Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- wemm/lib/python3.10/site-packages/lit-18.1.8.dist-info/REQUESTED +0 -0
- wemm/lib/python3.10/site-packages/lit-18.1.8.dist-info/entry_points.txt +2 -0
- wemm/lib/python3.10/site-packages/lit-18.1.8.dist-info/top_level.txt +1 -0
- wemm/lib/python3.10/site-packages/networkx/__pycache__/convert.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/networkx/classes/__pycache__/digraph.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/networkx/classes/__pycache__/graphviews.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/networkx/classes/__pycache__/multidigraph.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/networkx/classes/function.py +1407 -0
- wemm/lib/python3.10/site-packages/networkx/classes/tests/__pycache__/test_coreviews.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/networkx/classes/tests/__pycache__/test_digraph.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/networkx/classes/tests/__pycache__/test_graphviews.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/networkx/classes/tests/__pycache__/test_multidigraph.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/networkx/classes/tests/test_subgraphviews.py +362 -0
- wemm/lib/python3.10/site-packages/networkx/readwrite/__pycache__/__init__.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/networkx/readwrite/__pycache__/graph6.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/networkx/readwrite/__pycache__/graphml.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/networkx/readwrite/__pycache__/leda.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/networkx/readwrite/__pycache__/p2g.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/networkx/readwrite/__pycache__/text.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/networkx/readwrite/json_graph/__pycache__/__init__.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/networkx/readwrite/json_graph/cytoscape.py +178 -0
- wemm/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/nvidia_cublas_cu11-11.10.3.66.dist-info/REQUESTED +0 -0
- wemm/lib/python3.10/site-packages/simplejson-3.19.3.dist-info/WHEEL +8 -0
- wemm/lib/python3.10/site-packages/simplejson-3.19.3.dist-info/top_level.txt +1 -0
- wemm/lib/python3.10/site-packages/torchgen/__init__.py +10 -0
- wemm/lib/python3.10/site-packages/torchgen/__pycache__/context.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchgen/__pycache__/gen.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchgen/__pycache__/gen_lazy_tensor.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchgen/__pycache__/local.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchgen/__pycache__/model.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchgen/api/__pycache__/cpp.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchgen/api/__pycache__/lazy.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchgen/api/__pycache__/translate.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchgen/api/functionalization.py +175 -0
- wemm/lib/python3.10/site-packages/torchgen/api/meta.py +12 -0
- wemm/lib/python3.10/site-packages/torchgen/api/python.py +1476 -0
- wemm/lib/python3.10/site-packages/torchgen/api/ufunc.py +209 -0
- wemm/lib/python3.10/site-packages/torchgen/code_template.py +96 -0
- wemm/lib/python3.10/site-packages/torchgen/context.py +115 -0
- wemm/lib/python3.10/site-packages/torchgen/dest/__pycache__/__init__.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchgen/dest/__pycache__/lazy_ir.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchgen/dest/__pycache__/native_functions.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchgen/dest/__pycache__/register_dispatch_key.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchgen/dest/__pycache__/ufunc.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchgen/dest/lazy_ts_lowering.py +48 -0
- wemm/lib/python3.10/site-packages/torchgen/dest/native_functions.py +64 -0
- wemm/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/custom_ops.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchgen/executorch/api/et_cpp.py +368 -0
- wemm/lib/python3.10/site-packages/torchgen/executorch/api/types/__init__.py +2 -0
wemm/lib/python3.10/site-packages/lit-18.1.8.dist-info/REQUESTED
ADDED
|
File without changes
|
wemm/lib/python3.10/site-packages/lit-18.1.8.dist-info/entry_points.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[console_scripts]
|
| 2 |
+
lit = lit.main:main
|
wemm/lib/python3.10/site-packages/lit-18.1.8.dist-info/top_level.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
lit
|
wemm/lib/python3.10/site-packages/networkx/__pycache__/convert.cpython-310.pyc
ADDED
|
Binary file (13.1 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/networkx/classes/__pycache__/digraph.cpython-310.pyc
ADDED
|
Binary file (46.5 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/networkx/classes/__pycache__/graphviews.cpython-310.pyc
ADDED
|
Binary file (8.12 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/networkx/classes/__pycache__/multidigraph.cpython-310.pyc
ADDED
|
Binary file (36 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/networkx/classes/function.py
ADDED
|
@@ -0,0 +1,1407 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Functional interface to graph methods and assorted utilities."""
|
| 2 |
+
|
| 3 |
+
from collections import Counter
|
| 4 |
+
from itertools import chain
|
| 5 |
+
|
| 6 |
+
import networkx as nx
|
| 7 |
+
from networkx.utils import not_implemented_for, pairwise
|
| 8 |
+
|
| 9 |
+
__all__ = [
|
| 10 |
+
"nodes",
|
| 11 |
+
"edges",
|
| 12 |
+
"degree",
|
| 13 |
+
"degree_histogram",
|
| 14 |
+
"neighbors",
|
| 15 |
+
"number_of_nodes",
|
| 16 |
+
"number_of_edges",
|
| 17 |
+
"density",
|
| 18 |
+
"is_directed",
|
| 19 |
+
"freeze",
|
| 20 |
+
"is_frozen",
|
| 21 |
+
"subgraph",
|
| 22 |
+
"induced_subgraph",
|
| 23 |
+
"edge_subgraph",
|
| 24 |
+
"restricted_view",
|
| 25 |
+
"to_directed",
|
| 26 |
+
"to_undirected",
|
| 27 |
+
"add_star",
|
| 28 |
+
"add_path",
|
| 29 |
+
"add_cycle",
|
| 30 |
+
"create_empty_copy",
|
| 31 |
+
"set_node_attributes",
|
| 32 |
+
"get_node_attributes",
|
| 33 |
+
"remove_node_attributes",
|
| 34 |
+
"set_edge_attributes",
|
| 35 |
+
"get_edge_attributes",
|
| 36 |
+
"remove_edge_attributes",
|
| 37 |
+
"all_neighbors",
|
| 38 |
+
"non_neighbors",
|
| 39 |
+
"non_edges",
|
| 40 |
+
"common_neighbors",
|
| 41 |
+
"is_weighted",
|
| 42 |
+
"is_negatively_weighted",
|
| 43 |
+
"is_empty",
|
| 44 |
+
"selfloop_edges",
|
| 45 |
+
"nodes_with_selfloops",
|
| 46 |
+
"number_of_selfloops",
|
| 47 |
+
"path_weight",
|
| 48 |
+
"is_path",
|
| 49 |
+
]
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def nodes(G):
|
| 53 |
+
"""Returns a NodeView over the graph nodes.
|
| 54 |
+
|
| 55 |
+
This function wraps the :func:`G.nodes <networkx.Graph.nodes>` property.
|
| 56 |
+
"""
|
| 57 |
+
return G.nodes()
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def edges(G, nbunch=None):
|
| 61 |
+
"""Returns an edge view of edges incident to nodes in nbunch.
|
| 62 |
+
|
| 63 |
+
Return all edges if nbunch is unspecified or nbunch=None.
|
| 64 |
+
|
| 65 |
+
For digraphs, edges=out_edges
|
| 66 |
+
|
| 67 |
+
This function wraps the :func:`G.edges <networkx.Graph.edges>` property.
|
| 68 |
+
"""
|
| 69 |
+
return G.edges(nbunch)
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def degree(G, nbunch=None, weight=None):
|
| 73 |
+
"""Returns a degree view of single node or of nbunch of nodes.
|
| 74 |
+
If nbunch is omitted, then return degrees of *all* nodes.
|
| 75 |
+
|
| 76 |
+
This function wraps the :func:`G.degree <networkx.Graph.degree>` property.
|
| 77 |
+
"""
|
| 78 |
+
return G.degree(nbunch, weight)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def neighbors(G, n):
|
| 82 |
+
"""Returns an iterator over all neighbors of node n.
|
| 83 |
+
|
| 84 |
+
This function wraps the :func:`G.neighbors <networkx.Graph.neighbors>` function.
|
| 85 |
+
"""
|
| 86 |
+
return G.neighbors(n)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def number_of_nodes(G):
|
| 90 |
+
"""Returns the number of nodes in the graph.
|
| 91 |
+
|
| 92 |
+
This function wraps the :func:`G.number_of_nodes <networkx.Graph.number_of_nodes>` function.
|
| 93 |
+
"""
|
| 94 |
+
return G.number_of_nodes()
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def number_of_edges(G):
|
| 98 |
+
"""Returns the number of edges in the graph.
|
| 99 |
+
|
| 100 |
+
This function wraps the :func:`G.number_of_edges <networkx.Graph.number_of_edges>` function.
|
| 101 |
+
"""
|
| 102 |
+
return G.number_of_edges()
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def density(G):
|
| 106 |
+
r"""Returns the density of a graph.
|
| 107 |
+
|
| 108 |
+
The density for undirected graphs is
|
| 109 |
+
|
| 110 |
+
.. math::
|
| 111 |
+
|
| 112 |
+
d = \frac{2m}{n(n-1)},
|
| 113 |
+
|
| 114 |
+
and for directed graphs is
|
| 115 |
+
|
| 116 |
+
.. math::
|
| 117 |
+
|
| 118 |
+
d = \frac{m}{n(n-1)},
|
| 119 |
+
|
| 120 |
+
where `n` is the number of nodes and `m` is the number of edges in `G`.
|
| 121 |
+
|
| 122 |
+
Notes
|
| 123 |
+
-----
|
| 124 |
+
The density is 0 for a graph without edges and 1 for a complete graph.
|
| 125 |
+
The density of multigraphs can be higher than 1.
|
| 126 |
+
|
| 127 |
+
Self loops are counted in the total number of edges so graphs with self
|
| 128 |
+
loops can have density higher than 1.
|
| 129 |
+
"""
|
| 130 |
+
n = number_of_nodes(G)
|
| 131 |
+
m = number_of_edges(G)
|
| 132 |
+
if m == 0 or n <= 1:
|
| 133 |
+
return 0
|
| 134 |
+
d = m / (n * (n - 1))
|
| 135 |
+
if not G.is_directed():
|
| 136 |
+
d *= 2
|
| 137 |
+
return d
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def degree_histogram(G):
|
| 141 |
+
"""Returns a list of the frequency of each degree value.
|
| 142 |
+
|
| 143 |
+
Parameters
|
| 144 |
+
----------
|
| 145 |
+
G : Networkx graph
|
| 146 |
+
A graph
|
| 147 |
+
|
| 148 |
+
Returns
|
| 149 |
+
-------
|
| 150 |
+
hist : list
|
| 151 |
+
A list of frequencies of degrees.
|
| 152 |
+
The degree values are the index in the list.
|
| 153 |
+
|
| 154 |
+
Notes
|
| 155 |
+
-----
|
| 156 |
+
Note: the bins are width one, hence len(list) can be large
|
| 157 |
+
(Order(number_of_edges))
|
| 158 |
+
"""
|
| 159 |
+
counts = Counter(d for n, d in G.degree())
|
| 160 |
+
return [counts.get(i, 0) for i in range(max(counts) + 1 if counts else 0)]
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def is_directed(G):
|
| 164 |
+
"""Return True if graph is directed."""
|
| 165 |
+
return G.is_directed()
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
def frozen(*args, **kwargs):
|
| 169 |
+
"""Dummy method for raising errors when trying to modify frozen graphs"""
|
| 170 |
+
raise nx.NetworkXError("Frozen graph can't be modified")
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
def freeze(G):
|
| 174 |
+
"""Modify graph to prevent further change by adding or removing
|
| 175 |
+
nodes or edges.
|
| 176 |
+
|
| 177 |
+
Node and edge data can still be modified.
|
| 178 |
+
|
| 179 |
+
Parameters
|
| 180 |
+
----------
|
| 181 |
+
G : graph
|
| 182 |
+
A NetworkX graph
|
| 183 |
+
|
| 184 |
+
Examples
|
| 185 |
+
--------
|
| 186 |
+
>>> G = nx.path_graph(4)
|
| 187 |
+
>>> G = nx.freeze(G)
|
| 188 |
+
>>> try:
|
| 189 |
+
... G.add_edge(4, 5)
|
| 190 |
+
... except nx.NetworkXError as err:
|
| 191 |
+
... print(str(err))
|
| 192 |
+
Frozen graph can't be modified
|
| 193 |
+
|
| 194 |
+
Notes
|
| 195 |
+
-----
|
| 196 |
+
To "unfreeze" a graph you must make a copy by creating a new graph object:
|
| 197 |
+
|
| 198 |
+
>>> graph = nx.path_graph(4)
|
| 199 |
+
>>> frozen_graph = nx.freeze(graph)
|
| 200 |
+
>>> unfrozen_graph = nx.Graph(frozen_graph)
|
| 201 |
+
>>> nx.is_frozen(unfrozen_graph)
|
| 202 |
+
False
|
| 203 |
+
|
| 204 |
+
See Also
|
| 205 |
+
--------
|
| 206 |
+
is_frozen
|
| 207 |
+
"""
|
| 208 |
+
G.add_node = frozen
|
| 209 |
+
G.add_nodes_from = frozen
|
| 210 |
+
G.remove_node = frozen
|
| 211 |
+
G.remove_nodes_from = frozen
|
| 212 |
+
G.add_edge = frozen
|
| 213 |
+
G.add_edges_from = frozen
|
| 214 |
+
G.add_weighted_edges_from = frozen
|
| 215 |
+
G.remove_edge = frozen
|
| 216 |
+
G.remove_edges_from = frozen
|
| 217 |
+
G.clear = frozen
|
| 218 |
+
G.clear_edges = frozen
|
| 219 |
+
G.frozen = True
|
| 220 |
+
return G
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
def is_frozen(G):
|
| 224 |
+
"""Returns True if graph is frozen.
|
| 225 |
+
|
| 226 |
+
Parameters
|
| 227 |
+
----------
|
| 228 |
+
G : graph
|
| 229 |
+
A NetworkX graph
|
| 230 |
+
|
| 231 |
+
See Also
|
| 232 |
+
--------
|
| 233 |
+
freeze
|
| 234 |
+
"""
|
| 235 |
+
try:
|
| 236 |
+
return G.frozen
|
| 237 |
+
except AttributeError:
|
| 238 |
+
return False
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
def add_star(G_to_add_to, nodes_for_star, **attr):
|
| 242 |
+
"""Add a star to Graph G_to_add_to.
|
| 243 |
+
|
| 244 |
+
The first node in `nodes_for_star` is the middle of the star.
|
| 245 |
+
It is connected to all other nodes.
|
| 246 |
+
|
| 247 |
+
Parameters
|
| 248 |
+
----------
|
| 249 |
+
G_to_add_to : graph
|
| 250 |
+
A NetworkX graph
|
| 251 |
+
nodes_for_star : iterable container
|
| 252 |
+
A container of nodes.
|
| 253 |
+
attr : keyword arguments, optional (default= no attributes)
|
| 254 |
+
Attributes to add to every edge in star.
|
| 255 |
+
|
| 256 |
+
See Also
|
| 257 |
+
--------
|
| 258 |
+
add_path, add_cycle
|
| 259 |
+
|
| 260 |
+
Examples
|
| 261 |
+
--------
|
| 262 |
+
>>> G = nx.Graph()
|
| 263 |
+
>>> nx.add_star(G, [0, 1, 2, 3])
|
| 264 |
+
>>> nx.add_star(G, [10, 11, 12], weight=2)
|
| 265 |
+
"""
|
| 266 |
+
nlist = iter(nodes_for_star)
|
| 267 |
+
try:
|
| 268 |
+
v = next(nlist)
|
| 269 |
+
except StopIteration:
|
| 270 |
+
return
|
| 271 |
+
G_to_add_to.add_node(v)
|
| 272 |
+
edges = ((v, n) for n in nlist)
|
| 273 |
+
G_to_add_to.add_edges_from(edges, **attr)
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
def add_path(G_to_add_to, nodes_for_path, **attr):
|
| 277 |
+
"""Add a path to the Graph G_to_add_to.
|
| 278 |
+
|
| 279 |
+
Parameters
|
| 280 |
+
----------
|
| 281 |
+
G_to_add_to : graph
|
| 282 |
+
A NetworkX graph
|
| 283 |
+
nodes_for_path : iterable container
|
| 284 |
+
A container of nodes. A path will be constructed from
|
| 285 |
+
the nodes (in order) and added to the graph.
|
| 286 |
+
attr : keyword arguments, optional (default= no attributes)
|
| 287 |
+
Attributes to add to every edge in path.
|
| 288 |
+
|
| 289 |
+
See Also
|
| 290 |
+
--------
|
| 291 |
+
add_star, add_cycle
|
| 292 |
+
|
| 293 |
+
Examples
|
| 294 |
+
--------
|
| 295 |
+
>>> G = nx.Graph()
|
| 296 |
+
>>> nx.add_path(G, [0, 1, 2, 3])
|
| 297 |
+
>>> nx.add_path(G, [10, 11, 12], weight=7)
|
| 298 |
+
"""
|
| 299 |
+
nlist = iter(nodes_for_path)
|
| 300 |
+
try:
|
| 301 |
+
first_node = next(nlist)
|
| 302 |
+
except StopIteration:
|
| 303 |
+
return
|
| 304 |
+
G_to_add_to.add_node(first_node)
|
| 305 |
+
G_to_add_to.add_edges_from(pairwise(chain((first_node,), nlist)), **attr)
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
def add_cycle(G_to_add_to, nodes_for_cycle, **attr):
|
| 309 |
+
"""Add a cycle to the Graph G_to_add_to.
|
| 310 |
+
|
| 311 |
+
Parameters
|
| 312 |
+
----------
|
| 313 |
+
G_to_add_to : graph
|
| 314 |
+
A NetworkX graph
|
| 315 |
+
nodes_for_cycle: iterable container
|
| 316 |
+
A container of nodes. A cycle will be constructed from
|
| 317 |
+
the nodes (in order) and added to the graph.
|
| 318 |
+
attr : keyword arguments, optional (default= no attributes)
|
| 319 |
+
Attributes to add to every edge in cycle.
|
| 320 |
+
|
| 321 |
+
See Also
|
| 322 |
+
--------
|
| 323 |
+
add_path, add_star
|
| 324 |
+
|
| 325 |
+
Examples
|
| 326 |
+
--------
|
| 327 |
+
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
|
| 328 |
+
>>> nx.add_cycle(G, [0, 1, 2, 3])
|
| 329 |
+
>>> nx.add_cycle(G, [10, 11, 12], weight=7)
|
| 330 |
+
"""
|
| 331 |
+
nlist = iter(nodes_for_cycle)
|
| 332 |
+
try:
|
| 333 |
+
first_node = next(nlist)
|
| 334 |
+
except StopIteration:
|
| 335 |
+
return
|
| 336 |
+
G_to_add_to.add_node(first_node)
|
| 337 |
+
G_to_add_to.add_edges_from(
|
| 338 |
+
pairwise(chain((first_node,), nlist), cyclic=True), **attr
|
| 339 |
+
)
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
def subgraph(G, nbunch):
|
| 343 |
+
"""Returns the subgraph induced on nodes in nbunch.
|
| 344 |
+
|
| 345 |
+
Parameters
|
| 346 |
+
----------
|
| 347 |
+
G : graph
|
| 348 |
+
A NetworkX graph
|
| 349 |
+
|
| 350 |
+
nbunch : list, iterable
|
| 351 |
+
A container of nodes that will be iterated through once (thus
|
| 352 |
+
it should be an iterator or be iterable). Each element of the
|
| 353 |
+
container should be a valid node type: any hashable type except
|
| 354 |
+
None. If nbunch is None, return all edges data in the graph.
|
| 355 |
+
Nodes in nbunch that are not in the graph will be (quietly)
|
| 356 |
+
ignored.
|
| 357 |
+
|
| 358 |
+
Notes
|
| 359 |
+
-----
|
| 360 |
+
subgraph(G) calls G.subgraph()
|
| 361 |
+
"""
|
| 362 |
+
return G.subgraph(nbunch)
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
def induced_subgraph(G, nbunch):
|
| 366 |
+
"""Returns a SubGraph view of `G` showing only nodes in nbunch.
|
| 367 |
+
|
| 368 |
+
The induced subgraph of a graph on a set of nodes N is the
|
| 369 |
+
graph with nodes N and edges from G which have both ends in N.
|
| 370 |
+
|
| 371 |
+
Parameters
|
| 372 |
+
----------
|
| 373 |
+
G : NetworkX Graph
|
| 374 |
+
nbunch : node, container of nodes or None (for all nodes)
|
| 375 |
+
|
| 376 |
+
Returns
|
| 377 |
+
-------
|
| 378 |
+
subgraph : SubGraph View
|
| 379 |
+
A read-only view of the subgraph in `G` induced by the nodes.
|
| 380 |
+
Changes to the graph `G` will be reflected in the view.
|
| 381 |
+
|
| 382 |
+
Notes
|
| 383 |
+
-----
|
| 384 |
+
To create a mutable subgraph with its own copies of nodes
|
| 385 |
+
edges and attributes use `subgraph.copy()` or `Graph(subgraph)`
|
| 386 |
+
|
| 387 |
+
For an inplace reduction of a graph to a subgraph you can remove nodes:
|
| 388 |
+
`G.remove_nodes_from(n in G if n not in set(nbunch))`
|
| 389 |
+
|
| 390 |
+
If you are going to compute subgraphs of your subgraphs you could
|
| 391 |
+
end up with a chain of views that can be very slow once the chain
|
| 392 |
+
has about 15 views in it. If they are all induced subgraphs, you
|
| 393 |
+
can short-cut the chain by making them all subgraphs of the original
|
| 394 |
+
graph. The graph class method `G.subgraph` does this when `G` is
|
| 395 |
+
a subgraph. In contrast, this function allows you to choose to build
|
| 396 |
+
chains or not, as you wish. The returned subgraph is a view on `G`.
|
| 397 |
+
|
| 398 |
+
Examples
|
| 399 |
+
--------
|
| 400 |
+
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
|
| 401 |
+
>>> H = nx.induced_subgraph(G, [0, 1, 3])
|
| 402 |
+
>>> list(H.edges)
|
| 403 |
+
[(0, 1)]
|
| 404 |
+
>>> list(H.nodes)
|
| 405 |
+
[0, 1, 3]
|
| 406 |
+
"""
|
| 407 |
+
induced_nodes = nx.filters.show_nodes(G.nbunch_iter(nbunch))
|
| 408 |
+
return nx.subgraph_view(G, filter_node=induced_nodes)
|
| 409 |
+
|
| 410 |
+
|
| 411 |
+
def edge_subgraph(G, edges):
|
| 412 |
+
"""Returns a view of the subgraph induced by the specified edges.
|
| 413 |
+
|
| 414 |
+
The induced subgraph contains each edge in `edges` and each
|
| 415 |
+
node incident to any of those edges.
|
| 416 |
+
|
| 417 |
+
Parameters
|
| 418 |
+
----------
|
| 419 |
+
G : NetworkX Graph
|
| 420 |
+
edges : iterable
|
| 421 |
+
An iterable of edges. Edges not present in `G` are ignored.
|
| 422 |
+
|
| 423 |
+
Returns
|
| 424 |
+
-------
|
| 425 |
+
subgraph : SubGraph View
|
| 426 |
+
A read-only edge-induced subgraph of `G`.
|
| 427 |
+
Changes to `G` are reflected in the view.
|
| 428 |
+
|
| 429 |
+
Notes
|
| 430 |
+
-----
|
| 431 |
+
To create a mutable subgraph with its own copies of nodes
|
| 432 |
+
edges and attributes use `subgraph.copy()` or `Graph(subgraph)`
|
| 433 |
+
|
| 434 |
+
If you create a subgraph of a subgraph recursively you can end up
|
| 435 |
+
with a chain of subgraphs that becomes very slow with about 15
|
| 436 |
+
nested subgraph views. Luckily the edge_subgraph filter nests
|
| 437 |
+
nicely so you can use the original graph as G in this function
|
| 438 |
+
to avoid chains. We do not rule out chains programmatically so
|
| 439 |
+
that odd cases like an `edge_subgraph` of a `restricted_view`
|
| 440 |
+
can be created.
|
| 441 |
+
|
| 442 |
+
Examples
|
| 443 |
+
--------
|
| 444 |
+
>>> G = nx.path_graph(5)
|
| 445 |
+
>>> H = G.edge_subgraph([(0, 1), (3, 4)])
|
| 446 |
+
>>> list(H.nodes)
|
| 447 |
+
[0, 1, 3, 4]
|
| 448 |
+
>>> list(H.edges)
|
| 449 |
+
[(0, 1), (3, 4)]
|
| 450 |
+
"""
|
| 451 |
+
nxf = nx.filters
|
| 452 |
+
edges = set(edges)
|
| 453 |
+
nodes = set()
|
| 454 |
+
for e in edges:
|
| 455 |
+
nodes.update(e[:2])
|
| 456 |
+
induced_nodes = nxf.show_nodes(nodes)
|
| 457 |
+
if G.is_multigraph():
|
| 458 |
+
if G.is_directed():
|
| 459 |
+
induced_edges = nxf.show_multidiedges(edges)
|
| 460 |
+
else:
|
| 461 |
+
induced_edges = nxf.show_multiedges(edges)
|
| 462 |
+
else:
|
| 463 |
+
if G.is_directed():
|
| 464 |
+
induced_edges = nxf.show_diedges(edges)
|
| 465 |
+
else:
|
| 466 |
+
induced_edges = nxf.show_edges(edges)
|
| 467 |
+
return nx.subgraph_view(G, filter_node=induced_nodes, filter_edge=induced_edges)
|
| 468 |
+
|
| 469 |
+
|
| 470 |
+
def restricted_view(G, nodes, edges):
|
| 471 |
+
"""Returns a view of `G` with hidden nodes and edges.
|
| 472 |
+
|
| 473 |
+
The resulting subgraph filters out node `nodes` and edges `edges`.
|
| 474 |
+
Filtered out nodes also filter out any of their edges.
|
| 475 |
+
|
| 476 |
+
Parameters
|
| 477 |
+
----------
|
| 478 |
+
G : NetworkX Graph
|
| 479 |
+
nodes : iterable
|
| 480 |
+
An iterable of nodes. Nodes not present in `G` are ignored.
|
| 481 |
+
edges : iterable
|
| 482 |
+
An iterable of edges. Edges not present in `G` are ignored.
|
| 483 |
+
|
| 484 |
+
Returns
|
| 485 |
+
-------
|
| 486 |
+
subgraph : SubGraph View
|
| 487 |
+
A read-only restricted view of `G` filtering out nodes and edges.
|
| 488 |
+
Changes to `G` are reflected in the view.
|
| 489 |
+
|
| 490 |
+
Notes
|
| 491 |
+
-----
|
| 492 |
+
To create a mutable subgraph with its own copies of nodes
|
| 493 |
+
edges and attributes use `subgraph.copy()` or `Graph(subgraph)`
|
| 494 |
+
|
| 495 |
+
If you create a subgraph of a subgraph recursively you may end up
|
| 496 |
+
with a chain of subgraph views. Such chains can get quite slow
|
| 497 |
+
for lengths near 15. To avoid long chains, try to make your subgraph
|
| 498 |
+
based on the original graph. We do not rule out chains programmatically
|
| 499 |
+
so that odd cases like an `edge_subgraph` of a `restricted_view`
|
| 500 |
+
can be created.
|
| 501 |
+
|
| 502 |
+
Examples
|
| 503 |
+
--------
|
| 504 |
+
>>> G = nx.path_graph(5)
|
| 505 |
+
>>> H = nx.restricted_view(G, [0], [(1, 2), (3, 4)])
|
| 506 |
+
>>> list(H.nodes)
|
| 507 |
+
[1, 2, 3, 4]
|
| 508 |
+
>>> list(H.edges)
|
| 509 |
+
[(2, 3)]
|
| 510 |
+
"""
|
| 511 |
+
nxf = nx.filters
|
| 512 |
+
hide_nodes = nxf.hide_nodes(nodes)
|
| 513 |
+
if G.is_multigraph():
|
| 514 |
+
if G.is_directed():
|
| 515 |
+
hide_edges = nxf.hide_multidiedges(edges)
|
| 516 |
+
else:
|
| 517 |
+
hide_edges = nxf.hide_multiedges(edges)
|
| 518 |
+
else:
|
| 519 |
+
if G.is_directed():
|
| 520 |
+
hide_edges = nxf.hide_diedges(edges)
|
| 521 |
+
else:
|
| 522 |
+
hide_edges = nxf.hide_edges(edges)
|
| 523 |
+
return nx.subgraph_view(G, filter_node=hide_nodes, filter_edge=hide_edges)
|
| 524 |
+
|
| 525 |
+
|
| 526 |
+
def to_directed(graph):
|
| 527 |
+
"""Returns a directed view of the graph `graph`.
|
| 528 |
+
|
| 529 |
+
Identical to graph.to_directed(as_view=True)
|
| 530 |
+
Note that graph.to_directed defaults to `as_view=False`
|
| 531 |
+
while this function always provides a view.
|
| 532 |
+
"""
|
| 533 |
+
return graph.to_directed(as_view=True)
|
| 534 |
+
|
| 535 |
+
|
| 536 |
+
def to_undirected(graph):
|
| 537 |
+
"""Returns an undirected view of the graph `graph`.
|
| 538 |
+
|
| 539 |
+
Identical to graph.to_undirected(as_view=True)
|
| 540 |
+
Note that graph.to_undirected defaults to `as_view=False`
|
| 541 |
+
while this function always provides a view.
|
| 542 |
+
"""
|
| 543 |
+
return graph.to_undirected(as_view=True)
|
| 544 |
+
|
| 545 |
+
|
| 546 |
+
def create_empty_copy(G, with_data=True):
|
| 547 |
+
"""Returns a copy of the graph G with all of the edges removed.
|
| 548 |
+
|
| 549 |
+
Parameters
|
| 550 |
+
----------
|
| 551 |
+
G : graph
|
| 552 |
+
A NetworkX graph
|
| 553 |
+
|
| 554 |
+
with_data : bool (default=True)
|
| 555 |
+
Propagate Graph and Nodes data to the new graph.
|
| 556 |
+
|
| 557 |
+
See Also
|
| 558 |
+
--------
|
| 559 |
+
empty_graph
|
| 560 |
+
|
| 561 |
+
"""
|
| 562 |
+
H = G.__class__()
|
| 563 |
+
H.add_nodes_from(G.nodes(data=with_data))
|
| 564 |
+
if with_data:
|
| 565 |
+
H.graph.update(G.graph)
|
| 566 |
+
return H
|
| 567 |
+
|
| 568 |
+
|
| 569 |
+
def set_node_attributes(G, values, name=None):
|
| 570 |
+
"""Sets node attributes from a given value or dictionary of values.
|
| 571 |
+
|
| 572 |
+
.. Warning:: The call order of arguments `values` and `name`
|
| 573 |
+
switched between v1.x & v2.x.
|
| 574 |
+
|
| 575 |
+
Parameters
|
| 576 |
+
----------
|
| 577 |
+
G : NetworkX Graph
|
| 578 |
+
|
| 579 |
+
values : scalar value, dict-like
|
| 580 |
+
What the node attribute should be set to. If `values` is
|
| 581 |
+
not a dictionary, then it is treated as a single attribute value
|
| 582 |
+
that is then applied to every node in `G`. This means that if
|
| 583 |
+
you provide a mutable object, like a list, updates to that object
|
| 584 |
+
will be reflected in the node attribute for every node.
|
| 585 |
+
The attribute name will be `name`.
|
| 586 |
+
|
| 587 |
+
If `values` is a dict or a dict of dict, it should be keyed
|
| 588 |
+
by node to either an attribute value or a dict of attribute key/value
|
| 589 |
+
pairs used to update the node's attributes.
|
| 590 |
+
|
| 591 |
+
name : string (optional, default=None)
|
| 592 |
+
Name of the node attribute to set if values is a scalar.
|
| 593 |
+
|
| 594 |
+
Examples
|
| 595 |
+
--------
|
| 596 |
+
After computing some property of the nodes of a graph, you may want
|
| 597 |
+
to assign a node attribute to store the value of that property for
|
| 598 |
+
each node::
|
| 599 |
+
|
| 600 |
+
>>> G = nx.path_graph(3)
|
| 601 |
+
>>> bb = nx.betweenness_centrality(G)
|
| 602 |
+
>>> isinstance(bb, dict)
|
| 603 |
+
True
|
| 604 |
+
>>> nx.set_node_attributes(G, bb, "betweenness")
|
| 605 |
+
>>> G.nodes[1]["betweenness"]
|
| 606 |
+
1.0
|
| 607 |
+
|
| 608 |
+
If you provide a list as the second argument, updates to the list
|
| 609 |
+
will be reflected in the node attribute for each node::
|
| 610 |
+
|
| 611 |
+
>>> G = nx.path_graph(3)
|
| 612 |
+
>>> labels = []
|
| 613 |
+
>>> nx.set_node_attributes(G, labels, "labels")
|
| 614 |
+
>>> labels.append("foo")
|
| 615 |
+
>>> G.nodes[0]["labels"]
|
| 616 |
+
['foo']
|
| 617 |
+
>>> G.nodes[1]["labels"]
|
| 618 |
+
['foo']
|
| 619 |
+
>>> G.nodes[2]["labels"]
|
| 620 |
+
['foo']
|
| 621 |
+
|
| 622 |
+
If you provide a dictionary of dictionaries as the second argument,
|
| 623 |
+
the outer dictionary is assumed to be keyed by node to an inner
|
| 624 |
+
dictionary of node attributes for that node::
|
| 625 |
+
|
| 626 |
+
>>> G = nx.path_graph(3)
|
| 627 |
+
>>> attrs = {0: {"attr1": 20, "attr2": "nothing"}, 1: {"attr2": 3}}
|
| 628 |
+
>>> nx.set_node_attributes(G, attrs)
|
| 629 |
+
>>> G.nodes[0]["attr1"]
|
| 630 |
+
20
|
| 631 |
+
>>> G.nodes[0]["attr2"]
|
| 632 |
+
'nothing'
|
| 633 |
+
>>> G.nodes[1]["attr2"]
|
| 634 |
+
3
|
| 635 |
+
>>> G.nodes[2]
|
| 636 |
+
{}
|
| 637 |
+
|
| 638 |
+
Note that if the dictionary contains nodes that are not in `G`, the
|
| 639 |
+
values are silently ignored::
|
| 640 |
+
|
| 641 |
+
>>> G = nx.Graph()
|
| 642 |
+
>>> G.add_node(0)
|
| 643 |
+
>>> nx.set_node_attributes(G, {0: "red", 1: "blue"}, name="color")
|
| 644 |
+
>>> G.nodes[0]["color"]
|
| 645 |
+
'red'
|
| 646 |
+
>>> 1 in G.nodes
|
| 647 |
+
False
|
| 648 |
+
|
| 649 |
+
"""
|
| 650 |
+
# Set node attributes based on type of `values`
|
| 651 |
+
if name is not None: # `values` must not be a dict of dict
|
| 652 |
+
try: # `values` is a dict
|
| 653 |
+
for n, v in values.items():
|
| 654 |
+
try:
|
| 655 |
+
G.nodes[n][name] = values[n]
|
| 656 |
+
except KeyError:
|
| 657 |
+
pass
|
| 658 |
+
except AttributeError: # `values` is a constant
|
| 659 |
+
for n in G:
|
| 660 |
+
G.nodes[n][name] = values
|
| 661 |
+
else: # `values` must be dict of dict
|
| 662 |
+
for n, d in values.items():
|
| 663 |
+
try:
|
| 664 |
+
G.nodes[n].update(d)
|
| 665 |
+
except KeyError:
|
| 666 |
+
pass
|
| 667 |
+
nx._clear_cache(G)
|
| 668 |
+
|
| 669 |
+
|
| 670 |
+
def get_node_attributes(G, name, default=None):
|
| 671 |
+
"""Get node attributes from graph
|
| 672 |
+
|
| 673 |
+
Parameters
|
| 674 |
+
----------
|
| 675 |
+
G : NetworkX Graph
|
| 676 |
+
|
| 677 |
+
name : string
|
| 678 |
+
Attribute name
|
| 679 |
+
|
| 680 |
+
default: object (default=None)
|
| 681 |
+
Default value of the node attribute if there is no value set for that
|
| 682 |
+
node in graph. If `None` then nodes without this attribute are not
|
| 683 |
+
included in the returned dict.
|
| 684 |
+
|
| 685 |
+
Returns
|
| 686 |
+
-------
|
| 687 |
+
Dictionary of attributes keyed by node.
|
| 688 |
+
|
| 689 |
+
Examples
|
| 690 |
+
--------
|
| 691 |
+
>>> G = nx.Graph()
|
| 692 |
+
>>> G.add_nodes_from([1, 2, 3], color="red")
|
| 693 |
+
>>> color = nx.get_node_attributes(G, "color")
|
| 694 |
+
>>> color[1]
|
| 695 |
+
'red'
|
| 696 |
+
>>> G.add_node(4)
|
| 697 |
+
>>> color = nx.get_node_attributes(G, "color", default="yellow")
|
| 698 |
+
>>> color[4]
|
| 699 |
+
'yellow'
|
| 700 |
+
"""
|
| 701 |
+
if default is not None:
|
| 702 |
+
return {n: d.get(name, default) for n, d in G.nodes.items()}
|
| 703 |
+
return {n: d[name] for n, d in G.nodes.items() if name in d}
|
| 704 |
+
|
| 705 |
+
|
| 706 |
+
def remove_node_attributes(G, *attr_names, nbunch=None):
|
| 707 |
+
"""Remove node attributes from all nodes in the graph.
|
| 708 |
+
|
| 709 |
+
Parameters
|
| 710 |
+
----------
|
| 711 |
+
G : NetworkX Graph
|
| 712 |
+
|
| 713 |
+
*attr_names : List of Strings
|
| 714 |
+
The attribute names to remove from the graph.
|
| 715 |
+
|
| 716 |
+
nbunch : List of Nodes
|
| 717 |
+
Remove the node attributes only from the nodes in this list.
|
| 718 |
+
|
| 719 |
+
Examples
|
| 720 |
+
--------
|
| 721 |
+
>>> G = nx.Graph()
|
| 722 |
+
>>> G.add_nodes_from([1, 2, 3], color="blue")
|
| 723 |
+
>>> nx.get_node_attributes(G, "color")
|
| 724 |
+
{1: 'blue', 2: 'blue', 3: 'blue'}
|
| 725 |
+
>>> nx.remove_node_attributes(G, "color")
|
| 726 |
+
>>> nx.get_node_attributes(G, "color")
|
| 727 |
+
{}
|
| 728 |
+
"""
|
| 729 |
+
|
| 730 |
+
if nbunch is None:
|
| 731 |
+
nbunch = G.nodes()
|
| 732 |
+
|
| 733 |
+
for attr in attr_names:
|
| 734 |
+
for n, d in G.nodes(data=True):
|
| 735 |
+
if n in nbunch:
|
| 736 |
+
try:
|
| 737 |
+
del d[attr]
|
| 738 |
+
except KeyError:
|
| 739 |
+
pass
|
| 740 |
+
|
| 741 |
+
|
| 742 |
+
def set_edge_attributes(G, values, name=None):
|
| 743 |
+
"""Sets edge attributes from a given value or dictionary of values.
|
| 744 |
+
|
| 745 |
+
.. Warning:: The call order of arguments `values` and `name`
|
| 746 |
+
switched between v1.x & v2.x.
|
| 747 |
+
|
| 748 |
+
Parameters
|
| 749 |
+
----------
|
| 750 |
+
G : NetworkX Graph
|
| 751 |
+
|
| 752 |
+
values : scalar value, dict-like
|
| 753 |
+
What the edge attribute should be set to. If `values` is
|
| 754 |
+
not a dictionary, then it is treated as a single attribute value
|
| 755 |
+
that is then applied to every edge in `G`. This means that if
|
| 756 |
+
you provide a mutable object, like a list, updates to that object
|
| 757 |
+
will be reflected in the edge attribute for each edge. The attribute
|
| 758 |
+
name will be `name`.
|
| 759 |
+
|
| 760 |
+
If `values` is a dict or a dict of dict, it should be keyed
|
| 761 |
+
by edge tuple to either an attribute value or a dict of attribute
|
| 762 |
+
key/value pairs used to update the edge's attributes.
|
| 763 |
+
For multigraphs, the edge tuples must be of the form ``(u, v, key)``,
|
| 764 |
+
where `u` and `v` are nodes and `key` is the edge key.
|
| 765 |
+
For non-multigraphs, the keys must be tuples of the form ``(u, v)``.
|
| 766 |
+
|
| 767 |
+
name : string (optional, default=None)
|
| 768 |
+
Name of the edge attribute to set if values is a scalar.
|
| 769 |
+
|
| 770 |
+
Examples
|
| 771 |
+
--------
|
| 772 |
+
After computing some property of the edges of a graph, you may want
|
| 773 |
+
to assign a edge attribute to store the value of that property for
|
| 774 |
+
each edge::
|
| 775 |
+
|
| 776 |
+
>>> G = nx.path_graph(3)
|
| 777 |
+
>>> bb = nx.edge_betweenness_centrality(G, normalized=False)
|
| 778 |
+
>>> nx.set_edge_attributes(G, bb, "betweenness")
|
| 779 |
+
>>> G.edges[1, 2]["betweenness"]
|
| 780 |
+
2.0
|
| 781 |
+
|
| 782 |
+
If you provide a list as the second argument, updates to the list
|
| 783 |
+
will be reflected in the edge attribute for each edge::
|
| 784 |
+
|
| 785 |
+
>>> labels = []
|
| 786 |
+
>>> nx.set_edge_attributes(G, labels, "labels")
|
| 787 |
+
>>> labels.append("foo")
|
| 788 |
+
>>> G.edges[0, 1]["labels"]
|
| 789 |
+
['foo']
|
| 790 |
+
>>> G.edges[1, 2]["labels"]
|
| 791 |
+
['foo']
|
| 792 |
+
|
| 793 |
+
If you provide a dictionary of dictionaries as the second argument,
|
| 794 |
+
the entire dictionary will be used to update edge attributes::
|
| 795 |
+
|
| 796 |
+
>>> G = nx.path_graph(3)
|
| 797 |
+
>>> attrs = {(0, 1): {"attr1": 20, "attr2": "nothing"}, (1, 2): {"attr2": 3}}
|
| 798 |
+
>>> nx.set_edge_attributes(G, attrs)
|
| 799 |
+
>>> G[0][1]["attr1"]
|
| 800 |
+
20
|
| 801 |
+
>>> G[0][1]["attr2"]
|
| 802 |
+
'nothing'
|
| 803 |
+
>>> G[1][2]["attr2"]
|
| 804 |
+
3
|
| 805 |
+
|
| 806 |
+
The attributes of one Graph can be used to set those of another.
|
| 807 |
+
|
| 808 |
+
>>> H = nx.path_graph(3)
|
| 809 |
+
>>> nx.set_edge_attributes(H, G.edges)
|
| 810 |
+
|
| 811 |
+
Note that if the dict contains edges that are not in `G`, they are
|
| 812 |
+
silently ignored::
|
| 813 |
+
|
| 814 |
+
>>> G = nx.Graph([(0, 1)])
|
| 815 |
+
>>> nx.set_edge_attributes(G, {(1, 2): {"weight": 2.0}})
|
| 816 |
+
>>> (1, 2) in G.edges()
|
| 817 |
+
False
|
| 818 |
+
|
| 819 |
+
For multigraphs, the `values` dict is expected to be keyed by 3-tuples
|
| 820 |
+
including the edge key::
|
| 821 |
+
|
| 822 |
+
>>> MG = nx.MultiGraph()
|
| 823 |
+
>>> edges = [(0, 1), (0, 1)]
|
| 824 |
+
>>> MG.add_edges_from(edges) # Returns list of edge keys
|
| 825 |
+
[0, 1]
|
| 826 |
+
>>> attributes = {(0, 1, 0): {"cost": 21}, (0, 1, 1): {"cost": 7}}
|
| 827 |
+
>>> nx.set_edge_attributes(MG, attributes)
|
| 828 |
+
>>> MG[0][1][0]["cost"]
|
| 829 |
+
21
|
| 830 |
+
>>> MG[0][1][1]["cost"]
|
| 831 |
+
7
|
| 832 |
+
|
| 833 |
+
If MultiGraph attributes are desired for a Graph, you must convert the 3-tuple
|
| 834 |
+
multiedge to a 2-tuple edge and the last multiedge's attribute value will
|
| 835 |
+
overwrite the previous values. Continuing from the previous case we get::
|
| 836 |
+
|
| 837 |
+
>>> H = nx.path_graph([0, 1, 2])
|
| 838 |
+
>>> nx.set_edge_attributes(H, {(u, v): ed for u, v, ed in MG.edges.data()})
|
| 839 |
+
>>> nx.get_edge_attributes(H, "cost")
|
| 840 |
+
{(0, 1): 7}
|
| 841 |
+
|
| 842 |
+
"""
|
| 843 |
+
if name is not None:
|
| 844 |
+
# `values` does not contain attribute names
|
| 845 |
+
try:
|
| 846 |
+
# if `values` is a dict using `.items()` => {edge: value}
|
| 847 |
+
if G.is_multigraph():
|
| 848 |
+
for (u, v, key), value in values.items():
|
| 849 |
+
try:
|
| 850 |
+
G._adj[u][v][key][name] = value
|
| 851 |
+
except KeyError:
|
| 852 |
+
pass
|
| 853 |
+
else:
|
| 854 |
+
for (u, v), value in values.items():
|
| 855 |
+
try:
|
| 856 |
+
G._adj[u][v][name] = value
|
| 857 |
+
except KeyError:
|
| 858 |
+
pass
|
| 859 |
+
except AttributeError:
|
| 860 |
+
# treat `values` as a constant
|
| 861 |
+
for u, v, data in G.edges(data=True):
|
| 862 |
+
data[name] = values
|
| 863 |
+
else:
|
| 864 |
+
# `values` consists of doct-of-dict {edge: {attr: value}} shape
|
| 865 |
+
if G.is_multigraph():
|
| 866 |
+
for (u, v, key), d in values.items():
|
| 867 |
+
try:
|
| 868 |
+
G._adj[u][v][key].update(d)
|
| 869 |
+
except KeyError:
|
| 870 |
+
pass
|
| 871 |
+
else:
|
| 872 |
+
for (u, v), d in values.items():
|
| 873 |
+
try:
|
| 874 |
+
G._adj[u][v].update(d)
|
| 875 |
+
except KeyError:
|
| 876 |
+
pass
|
| 877 |
+
nx._clear_cache(G)
|
| 878 |
+
|
| 879 |
+
|
| 880 |
+
def get_edge_attributes(G, name, default=None):
|
| 881 |
+
"""Get edge attributes from graph
|
| 882 |
+
|
| 883 |
+
Parameters
|
| 884 |
+
----------
|
| 885 |
+
G : NetworkX Graph
|
| 886 |
+
|
| 887 |
+
name : string
|
| 888 |
+
Attribute name
|
| 889 |
+
|
| 890 |
+
default: object (default=None)
|
| 891 |
+
Default value of the edge attribute if there is no value set for that
|
| 892 |
+
edge in graph. If `None` then edges without this attribute are not
|
| 893 |
+
included in the returned dict.
|
| 894 |
+
|
| 895 |
+
Returns
|
| 896 |
+
-------
|
| 897 |
+
Dictionary of attributes keyed by edge. For (di)graphs, the keys are
|
| 898 |
+
2-tuples of the form: (u, v). For multi(di)graphs, the keys are 3-tuples of
|
| 899 |
+
the form: (u, v, key).
|
| 900 |
+
|
| 901 |
+
Examples
|
| 902 |
+
--------
|
| 903 |
+
>>> G = nx.Graph()
|
| 904 |
+
>>> nx.add_path(G, [1, 2, 3], color="red")
|
| 905 |
+
>>> color = nx.get_edge_attributes(G, "color")
|
| 906 |
+
>>> color[(1, 2)]
|
| 907 |
+
'red'
|
| 908 |
+
>>> G.add_edge(3, 4)
|
| 909 |
+
>>> color = nx.get_edge_attributes(G, "color", default="yellow")
|
| 910 |
+
>>> color[(3, 4)]
|
| 911 |
+
'yellow'
|
| 912 |
+
"""
|
| 913 |
+
if G.is_multigraph():
|
| 914 |
+
edges = G.edges(keys=True, data=True)
|
| 915 |
+
else:
|
| 916 |
+
edges = G.edges(data=True)
|
| 917 |
+
if default is not None:
|
| 918 |
+
return {x[:-1]: x[-1].get(name, default) for x in edges}
|
| 919 |
+
return {x[:-1]: x[-1][name] for x in edges if name in x[-1]}
|
| 920 |
+
|
| 921 |
+
|
| 922 |
+
def remove_edge_attributes(G, *attr_names, ebunch=None):
|
| 923 |
+
"""Remove edge attributes from all edges in the graph.
|
| 924 |
+
|
| 925 |
+
Parameters
|
| 926 |
+
----------
|
| 927 |
+
G : NetworkX Graph
|
| 928 |
+
|
| 929 |
+
*attr_names : List of Strings
|
| 930 |
+
The attribute names to remove from the graph.
|
| 931 |
+
|
| 932 |
+
Examples
|
| 933 |
+
--------
|
| 934 |
+
>>> G = nx.path_graph(3)
|
| 935 |
+
>>> nx.set_edge_attributes(G, {(u, v): u + v for u, v in G.edges()}, name="weight")
|
| 936 |
+
>>> nx.get_edge_attributes(G, "weight")
|
| 937 |
+
{(0, 1): 1, (1, 2): 3}
|
| 938 |
+
>>> remove_edge_attributes(G, "weight")
|
| 939 |
+
>>> nx.get_edge_attributes(G, "weight")
|
| 940 |
+
{}
|
| 941 |
+
"""
|
| 942 |
+
if ebunch is None:
|
| 943 |
+
ebunch = G.edges(keys=True) if G.is_multigraph() else G.edges()
|
| 944 |
+
|
| 945 |
+
for attr in attr_names:
|
| 946 |
+
edges = (
|
| 947 |
+
G.edges(keys=True, data=True) if G.is_multigraph() else G.edges(data=True)
|
| 948 |
+
)
|
| 949 |
+
for *e, d in edges:
|
| 950 |
+
if tuple(e) in ebunch:
|
| 951 |
+
try:
|
| 952 |
+
del d[attr]
|
| 953 |
+
except KeyError:
|
| 954 |
+
pass
|
| 955 |
+
|
| 956 |
+
|
| 957 |
+
def all_neighbors(graph, node):
|
| 958 |
+
"""Returns all of the neighbors of a node in the graph.
|
| 959 |
+
|
| 960 |
+
If the graph is directed returns predecessors as well as successors.
|
| 961 |
+
|
| 962 |
+
Parameters
|
| 963 |
+
----------
|
| 964 |
+
graph : NetworkX graph
|
| 965 |
+
Graph to find neighbors.
|
| 966 |
+
|
| 967 |
+
node : node
|
| 968 |
+
The node whose neighbors will be returned.
|
| 969 |
+
|
| 970 |
+
Returns
|
| 971 |
+
-------
|
| 972 |
+
neighbors : iterator
|
| 973 |
+
Iterator of neighbors
|
| 974 |
+
"""
|
| 975 |
+
if graph.is_directed():
|
| 976 |
+
values = chain(graph.predecessors(node), graph.successors(node))
|
| 977 |
+
else:
|
| 978 |
+
values = graph.neighbors(node)
|
| 979 |
+
return values
|
| 980 |
+
|
| 981 |
+
|
| 982 |
+
def non_neighbors(graph, node):
|
| 983 |
+
"""Returns the non-neighbors of the node in the graph.
|
| 984 |
+
|
| 985 |
+
Parameters
|
| 986 |
+
----------
|
| 987 |
+
graph : NetworkX graph
|
| 988 |
+
Graph to find neighbors.
|
| 989 |
+
|
| 990 |
+
node : node
|
| 991 |
+
The node whose neighbors will be returned.
|
| 992 |
+
|
| 993 |
+
Returns
|
| 994 |
+
-------
|
| 995 |
+
non_neighbors : set
|
| 996 |
+
Set of nodes in the graph that are not neighbors of the node.
|
| 997 |
+
"""
|
| 998 |
+
return graph._adj.keys() - graph._adj[node].keys() - {node}
|
| 999 |
+
|
| 1000 |
+
|
| 1001 |
+
def non_edges(graph):
|
| 1002 |
+
"""Returns the nonexistent edges in the graph.
|
| 1003 |
+
|
| 1004 |
+
Parameters
|
| 1005 |
+
----------
|
| 1006 |
+
graph : NetworkX graph.
|
| 1007 |
+
Graph to find nonexistent edges.
|
| 1008 |
+
|
| 1009 |
+
Returns
|
| 1010 |
+
-------
|
| 1011 |
+
non_edges : iterator
|
| 1012 |
+
Iterator of edges that are not in the graph.
|
| 1013 |
+
"""
|
| 1014 |
+
if graph.is_directed():
|
| 1015 |
+
for u in graph:
|
| 1016 |
+
for v in non_neighbors(graph, u):
|
| 1017 |
+
yield (u, v)
|
| 1018 |
+
else:
|
| 1019 |
+
nodes = set(graph)
|
| 1020 |
+
while nodes:
|
| 1021 |
+
u = nodes.pop()
|
| 1022 |
+
for v in nodes - set(graph[u]):
|
| 1023 |
+
yield (u, v)
|
| 1024 |
+
|
| 1025 |
+
|
| 1026 |
+
@not_implemented_for("directed")
|
| 1027 |
+
def common_neighbors(G, u, v):
|
| 1028 |
+
"""Returns the common neighbors of two nodes in a graph.
|
| 1029 |
+
|
| 1030 |
+
Parameters
|
| 1031 |
+
----------
|
| 1032 |
+
G : graph
|
| 1033 |
+
A NetworkX undirected graph.
|
| 1034 |
+
|
| 1035 |
+
u, v : nodes
|
| 1036 |
+
Nodes in the graph.
|
| 1037 |
+
|
| 1038 |
+
Returns
|
| 1039 |
+
-------
|
| 1040 |
+
cnbors : set
|
| 1041 |
+
Set of common neighbors of u and v in the graph.
|
| 1042 |
+
|
| 1043 |
+
Raises
|
| 1044 |
+
------
|
| 1045 |
+
NetworkXError
|
| 1046 |
+
If u or v is not a node in the graph.
|
| 1047 |
+
|
| 1048 |
+
Examples
|
| 1049 |
+
--------
|
| 1050 |
+
>>> G = nx.complete_graph(5)
|
| 1051 |
+
>>> sorted(nx.common_neighbors(G, 0, 1))
|
| 1052 |
+
[2, 3, 4]
|
| 1053 |
+
"""
|
| 1054 |
+
if u not in G:
|
| 1055 |
+
raise nx.NetworkXError("u is not in the graph.")
|
| 1056 |
+
if v not in G:
|
| 1057 |
+
raise nx.NetworkXError("v is not in the graph.")
|
| 1058 |
+
|
| 1059 |
+
return G._adj[u].keys() & G._adj[v].keys() - {u, v}
|
| 1060 |
+
|
| 1061 |
+
|
| 1062 |
+
def is_weighted(G, edge=None, weight="weight"):
|
| 1063 |
+
"""Returns True if `G` has weighted edges.
|
| 1064 |
+
|
| 1065 |
+
Parameters
|
| 1066 |
+
----------
|
| 1067 |
+
G : graph
|
| 1068 |
+
A NetworkX graph.
|
| 1069 |
+
|
| 1070 |
+
edge : tuple, optional
|
| 1071 |
+
A 2-tuple specifying the only edge in `G` that will be tested. If
|
| 1072 |
+
None, then every edge in `G` is tested.
|
| 1073 |
+
|
| 1074 |
+
weight: string, optional
|
| 1075 |
+
The attribute name used to query for edge weights.
|
| 1076 |
+
|
| 1077 |
+
Returns
|
| 1078 |
+
-------
|
| 1079 |
+
bool
|
| 1080 |
+
A boolean signifying if `G`, or the specified edge, is weighted.
|
| 1081 |
+
|
| 1082 |
+
Raises
|
| 1083 |
+
------
|
| 1084 |
+
NetworkXError
|
| 1085 |
+
If the specified edge does not exist.
|
| 1086 |
+
|
| 1087 |
+
Examples
|
| 1088 |
+
--------
|
| 1089 |
+
>>> G = nx.path_graph(4)
|
| 1090 |
+
>>> nx.is_weighted(G)
|
| 1091 |
+
False
|
| 1092 |
+
>>> nx.is_weighted(G, (2, 3))
|
| 1093 |
+
False
|
| 1094 |
+
|
| 1095 |
+
>>> G = nx.DiGraph()
|
| 1096 |
+
>>> G.add_edge(1, 2, weight=1)
|
| 1097 |
+
>>> nx.is_weighted(G)
|
| 1098 |
+
True
|
| 1099 |
+
|
| 1100 |
+
"""
|
| 1101 |
+
if edge is not None:
|
| 1102 |
+
data = G.get_edge_data(*edge)
|
| 1103 |
+
if data is None:
|
| 1104 |
+
msg = f"Edge {edge!r} does not exist."
|
| 1105 |
+
raise nx.NetworkXError(msg)
|
| 1106 |
+
return weight in data
|
| 1107 |
+
|
| 1108 |
+
if is_empty(G):
|
| 1109 |
+
# Special handling required since: all([]) == True
|
| 1110 |
+
return False
|
| 1111 |
+
|
| 1112 |
+
return all(weight in data for u, v, data in G.edges(data=True))
|
| 1113 |
+
|
| 1114 |
+
|
| 1115 |
+
@nx._dispatchable(edge_attrs="weight")
|
| 1116 |
+
def is_negatively_weighted(G, edge=None, weight="weight"):
|
| 1117 |
+
"""Returns True if `G` has negatively weighted edges.
|
| 1118 |
+
|
| 1119 |
+
Parameters
|
| 1120 |
+
----------
|
| 1121 |
+
G : graph
|
| 1122 |
+
A NetworkX graph.
|
| 1123 |
+
|
| 1124 |
+
edge : tuple, optional
|
| 1125 |
+
A 2-tuple specifying the only edge in `G` that will be tested. If
|
| 1126 |
+
None, then every edge in `G` is tested.
|
| 1127 |
+
|
| 1128 |
+
weight: string, optional
|
| 1129 |
+
The attribute name used to query for edge weights.
|
| 1130 |
+
|
| 1131 |
+
Returns
|
| 1132 |
+
-------
|
| 1133 |
+
bool
|
| 1134 |
+
A boolean signifying if `G`, or the specified edge, is negatively
|
| 1135 |
+
weighted.
|
| 1136 |
+
|
| 1137 |
+
Raises
|
| 1138 |
+
------
|
| 1139 |
+
NetworkXError
|
| 1140 |
+
If the specified edge does not exist.
|
| 1141 |
+
|
| 1142 |
+
Examples
|
| 1143 |
+
--------
|
| 1144 |
+
>>> G = nx.Graph()
|
| 1145 |
+
>>> G.add_edges_from([(1, 3), (2, 4), (2, 6)])
|
| 1146 |
+
>>> G.add_edge(1, 2, weight=4)
|
| 1147 |
+
>>> nx.is_negatively_weighted(G, (1, 2))
|
| 1148 |
+
False
|
| 1149 |
+
>>> G[2][4]["weight"] = -2
|
| 1150 |
+
>>> nx.is_negatively_weighted(G)
|
| 1151 |
+
True
|
| 1152 |
+
>>> G = nx.DiGraph()
|
| 1153 |
+
>>> edges = [("0", "3", 3), ("0", "1", -5), ("1", "0", -2)]
|
| 1154 |
+
>>> G.add_weighted_edges_from(edges)
|
| 1155 |
+
>>> nx.is_negatively_weighted(G)
|
| 1156 |
+
True
|
| 1157 |
+
|
| 1158 |
+
"""
|
| 1159 |
+
if edge is not None:
|
| 1160 |
+
data = G.get_edge_data(*edge)
|
| 1161 |
+
if data is None:
|
| 1162 |
+
msg = f"Edge {edge!r} does not exist."
|
| 1163 |
+
raise nx.NetworkXError(msg)
|
| 1164 |
+
return weight in data and data[weight] < 0
|
| 1165 |
+
|
| 1166 |
+
return any(weight in data and data[weight] < 0 for u, v, data in G.edges(data=True))
|
| 1167 |
+
|
| 1168 |
+
|
| 1169 |
+
def is_empty(G):
|
| 1170 |
+
"""Returns True if `G` has no edges.
|
| 1171 |
+
|
| 1172 |
+
Parameters
|
| 1173 |
+
----------
|
| 1174 |
+
G : graph
|
| 1175 |
+
A NetworkX graph.
|
| 1176 |
+
|
| 1177 |
+
Returns
|
| 1178 |
+
-------
|
| 1179 |
+
bool
|
| 1180 |
+
True if `G` has no edges, and False otherwise.
|
| 1181 |
+
|
| 1182 |
+
Notes
|
| 1183 |
+
-----
|
| 1184 |
+
An empty graph can have nodes but not edges. The empty graph with zero
|
| 1185 |
+
nodes is known as the null graph. This is an $O(n)$ operation where n
|
| 1186 |
+
is the number of nodes in the graph.
|
| 1187 |
+
|
| 1188 |
+
"""
|
| 1189 |
+
return not any(G._adj.values())
|
| 1190 |
+
|
| 1191 |
+
|
| 1192 |
+
def nodes_with_selfloops(G):
|
| 1193 |
+
"""Returns an iterator over nodes with self loops.
|
| 1194 |
+
|
| 1195 |
+
A node with a self loop has an edge with both ends adjacent
|
| 1196 |
+
to that node.
|
| 1197 |
+
|
| 1198 |
+
Returns
|
| 1199 |
+
-------
|
| 1200 |
+
nodelist : iterator
|
| 1201 |
+
A iterator over nodes with self loops.
|
| 1202 |
+
|
| 1203 |
+
See Also
|
| 1204 |
+
--------
|
| 1205 |
+
selfloop_edges, number_of_selfloops
|
| 1206 |
+
|
| 1207 |
+
Examples
|
| 1208 |
+
--------
|
| 1209 |
+
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
|
| 1210 |
+
>>> G.add_edge(1, 1)
|
| 1211 |
+
>>> G.add_edge(1, 2)
|
| 1212 |
+
>>> list(nx.nodes_with_selfloops(G))
|
| 1213 |
+
[1]
|
| 1214 |
+
|
| 1215 |
+
"""
|
| 1216 |
+
return (n for n, nbrs in G._adj.items() if n in nbrs)
|
| 1217 |
+
|
| 1218 |
+
|
| 1219 |
+
def selfloop_edges(G, data=False, keys=False, default=None):
|
| 1220 |
+
"""Returns an iterator over selfloop edges.
|
| 1221 |
+
|
| 1222 |
+
A selfloop edge has the same node at both ends.
|
| 1223 |
+
|
| 1224 |
+
Parameters
|
| 1225 |
+
----------
|
| 1226 |
+
G : graph
|
| 1227 |
+
A NetworkX graph.
|
| 1228 |
+
data : string or bool, optional (default=False)
|
| 1229 |
+
Return selfloop edges as two tuples (u, v) (data=False)
|
| 1230 |
+
or three-tuples (u, v, datadict) (data=True)
|
| 1231 |
+
or three-tuples (u, v, datavalue) (data='attrname')
|
| 1232 |
+
keys : bool, optional (default=False)
|
| 1233 |
+
If True, return edge keys with each edge.
|
| 1234 |
+
default : value, optional (default=None)
|
| 1235 |
+
Value used for edges that don't have the requested attribute.
|
| 1236 |
+
Only relevant if data is not True or False.
|
| 1237 |
+
|
| 1238 |
+
Returns
|
| 1239 |
+
-------
|
| 1240 |
+
edgeiter : iterator over edge tuples
|
| 1241 |
+
An iterator over all selfloop edges.
|
| 1242 |
+
|
| 1243 |
+
See Also
|
| 1244 |
+
--------
|
| 1245 |
+
nodes_with_selfloops, number_of_selfloops
|
| 1246 |
+
|
| 1247 |
+
Examples
|
| 1248 |
+
--------
|
| 1249 |
+
>>> G = nx.MultiGraph() # or Graph, DiGraph, MultiDiGraph, etc
|
| 1250 |
+
>>> ekey = G.add_edge(1, 1)
|
| 1251 |
+
>>> ekey = G.add_edge(1, 2)
|
| 1252 |
+
>>> list(nx.selfloop_edges(G))
|
| 1253 |
+
[(1, 1)]
|
| 1254 |
+
>>> list(nx.selfloop_edges(G, data=True))
|
| 1255 |
+
[(1, 1, {})]
|
| 1256 |
+
>>> list(nx.selfloop_edges(G, keys=True))
|
| 1257 |
+
[(1, 1, 0)]
|
| 1258 |
+
>>> list(nx.selfloop_edges(G, keys=True, data=True))
|
| 1259 |
+
[(1, 1, 0, {})]
|
| 1260 |
+
"""
|
| 1261 |
+
if data is True:
|
| 1262 |
+
if G.is_multigraph():
|
| 1263 |
+
if keys is True:
|
| 1264 |
+
return (
|
| 1265 |
+
(n, n, k, d)
|
| 1266 |
+
for n, nbrs in G._adj.items()
|
| 1267 |
+
if n in nbrs
|
| 1268 |
+
for k, d in nbrs[n].items()
|
| 1269 |
+
)
|
| 1270 |
+
else:
|
| 1271 |
+
return (
|
| 1272 |
+
(n, n, d)
|
| 1273 |
+
for n, nbrs in G._adj.items()
|
| 1274 |
+
if n in nbrs
|
| 1275 |
+
for d in nbrs[n].values()
|
| 1276 |
+
)
|
| 1277 |
+
else:
|
| 1278 |
+
return ((n, n, nbrs[n]) for n, nbrs in G._adj.items() if n in nbrs)
|
| 1279 |
+
elif data is not False:
|
| 1280 |
+
if G.is_multigraph():
|
| 1281 |
+
if keys is True:
|
| 1282 |
+
return (
|
| 1283 |
+
(n, n, k, d.get(data, default))
|
| 1284 |
+
for n, nbrs in G._adj.items()
|
| 1285 |
+
if n in nbrs
|
| 1286 |
+
for k, d in nbrs[n].items()
|
| 1287 |
+
)
|
| 1288 |
+
else:
|
| 1289 |
+
return (
|
| 1290 |
+
(n, n, d.get(data, default))
|
| 1291 |
+
for n, nbrs in G._adj.items()
|
| 1292 |
+
if n in nbrs
|
| 1293 |
+
for d in nbrs[n].values()
|
| 1294 |
+
)
|
| 1295 |
+
else:
|
| 1296 |
+
return (
|
| 1297 |
+
(n, n, nbrs[n].get(data, default))
|
| 1298 |
+
for n, nbrs in G._adj.items()
|
| 1299 |
+
if n in nbrs
|
| 1300 |
+
)
|
| 1301 |
+
else:
|
| 1302 |
+
if G.is_multigraph():
|
| 1303 |
+
if keys is True:
|
| 1304 |
+
return (
|
| 1305 |
+
(n, n, k)
|
| 1306 |
+
for n, nbrs in G._adj.items()
|
| 1307 |
+
if n in nbrs
|
| 1308 |
+
for k in nbrs[n]
|
| 1309 |
+
)
|
| 1310 |
+
else:
|
| 1311 |
+
return (
|
| 1312 |
+
(n, n)
|
| 1313 |
+
for n, nbrs in G._adj.items()
|
| 1314 |
+
if n in nbrs
|
| 1315 |
+
for i in range(len(nbrs[n])) # for easy edge removal (#4068)
|
| 1316 |
+
)
|
| 1317 |
+
else:
|
| 1318 |
+
return ((n, n) for n, nbrs in G._adj.items() if n in nbrs)
|
| 1319 |
+
|
| 1320 |
+
|
| 1321 |
+
def number_of_selfloops(G):
|
| 1322 |
+
"""Returns the number of selfloop edges.
|
| 1323 |
+
|
| 1324 |
+
A selfloop edge has the same node at both ends.
|
| 1325 |
+
|
| 1326 |
+
Returns
|
| 1327 |
+
-------
|
| 1328 |
+
nloops : int
|
| 1329 |
+
The number of selfloops.
|
| 1330 |
+
|
| 1331 |
+
See Also
|
| 1332 |
+
--------
|
| 1333 |
+
nodes_with_selfloops, selfloop_edges
|
| 1334 |
+
|
| 1335 |
+
Examples
|
| 1336 |
+
--------
|
| 1337 |
+
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
|
| 1338 |
+
>>> G.add_edge(1, 1)
|
| 1339 |
+
>>> G.add_edge(1, 2)
|
| 1340 |
+
>>> nx.number_of_selfloops(G)
|
| 1341 |
+
1
|
| 1342 |
+
"""
|
| 1343 |
+
return sum(1 for _ in nx.selfloop_edges(G))
|
| 1344 |
+
|
| 1345 |
+
|
| 1346 |
+
def is_path(G, path):
|
| 1347 |
+
"""Returns whether or not the specified path exists.
|
| 1348 |
+
|
| 1349 |
+
For it to return True, every node on the path must exist and
|
| 1350 |
+
each consecutive pair must be connected via one or more edges.
|
| 1351 |
+
|
| 1352 |
+
Parameters
|
| 1353 |
+
----------
|
| 1354 |
+
G : graph
|
| 1355 |
+
A NetworkX graph.
|
| 1356 |
+
|
| 1357 |
+
path : list
|
| 1358 |
+
A list of nodes which defines the path to traverse
|
| 1359 |
+
|
| 1360 |
+
Returns
|
| 1361 |
+
-------
|
| 1362 |
+
bool
|
| 1363 |
+
True if `path` is a valid path in `G`
|
| 1364 |
+
|
| 1365 |
+
"""
|
| 1366 |
+
try:
|
| 1367 |
+
return all(nbr in G._adj[node] for node, nbr in nx.utils.pairwise(path))
|
| 1368 |
+
except (KeyError, TypeError):
|
| 1369 |
+
return False
|
| 1370 |
+
|
| 1371 |
+
|
| 1372 |
+
def path_weight(G, path, weight):
|
| 1373 |
+
"""Returns total cost associated with specified path and weight
|
| 1374 |
+
|
| 1375 |
+
Parameters
|
| 1376 |
+
----------
|
| 1377 |
+
G : graph
|
| 1378 |
+
A NetworkX graph.
|
| 1379 |
+
|
| 1380 |
+
path: list
|
| 1381 |
+
A list of node labels which defines the path to traverse
|
| 1382 |
+
|
| 1383 |
+
weight: string
|
| 1384 |
+
A string indicating which edge attribute to use for path cost
|
| 1385 |
+
|
| 1386 |
+
Returns
|
| 1387 |
+
-------
|
| 1388 |
+
cost: int or float
|
| 1389 |
+
An integer or a float representing the total cost with respect to the
|
| 1390 |
+
specified weight of the specified path
|
| 1391 |
+
|
| 1392 |
+
Raises
|
| 1393 |
+
------
|
| 1394 |
+
NetworkXNoPath
|
| 1395 |
+
If the specified edge does not exist.
|
| 1396 |
+
"""
|
| 1397 |
+
multigraph = G.is_multigraph()
|
| 1398 |
+
cost = 0
|
| 1399 |
+
|
| 1400 |
+
if not nx.is_path(G, path):
|
| 1401 |
+
raise nx.NetworkXNoPath("path does not exist")
|
| 1402 |
+
for node, nbr in nx.utils.pairwise(path):
|
| 1403 |
+
if multigraph:
|
| 1404 |
+
cost += min(v[weight] for v in G._adj[node][nbr].values())
|
| 1405 |
+
else:
|
| 1406 |
+
cost += G._adj[node][nbr][weight]
|
| 1407 |
+
return cost
|
wemm/lib/python3.10/site-packages/networkx/classes/tests/__pycache__/test_coreviews.cpython-310.pyc
ADDED
|
Binary file (13.4 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/networkx/classes/tests/__pycache__/test_digraph.cpython-310.pyc
ADDED
|
Binary file (13.2 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/networkx/classes/tests/__pycache__/test_graphviews.cpython-310.pyc
ADDED
|
Binary file (13.5 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/networkx/classes/tests/__pycache__/test_multidigraph.cpython-310.pyc
ADDED
|
Binary file (14.7 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/networkx/classes/tests/test_subgraphviews.py
ADDED
|
@@ -0,0 +1,362 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
|
| 3 |
+
import networkx as nx
|
| 4 |
+
from networkx.utils import edges_equal
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class TestSubGraphView:
|
| 8 |
+
gview = staticmethod(nx.subgraph_view)
|
| 9 |
+
graph = nx.Graph
|
| 10 |
+
hide_edges_filter = staticmethod(nx.filters.hide_edges)
|
| 11 |
+
show_edges_filter = staticmethod(nx.filters.show_edges)
|
| 12 |
+
|
| 13 |
+
@classmethod
|
| 14 |
+
def setup_class(cls):
|
| 15 |
+
cls.G = nx.path_graph(9, create_using=cls.graph())
|
| 16 |
+
cls.hide_edges_w_hide_nodes = {(3, 4), (4, 5), (5, 6)}
|
| 17 |
+
|
| 18 |
+
def test_hidden_nodes(self):
|
| 19 |
+
hide_nodes = [4, 5, 111]
|
| 20 |
+
nodes_gone = nx.filters.hide_nodes(hide_nodes)
|
| 21 |
+
gview = self.gview
|
| 22 |
+
G = gview(self.G, filter_node=nodes_gone)
|
| 23 |
+
assert self.G.nodes - G.nodes == {4, 5}
|
| 24 |
+
assert self.G.edges - G.edges == self.hide_edges_w_hide_nodes
|
| 25 |
+
if G.is_directed():
|
| 26 |
+
assert list(G[3]) == []
|
| 27 |
+
assert list(G[2]) == [3]
|
| 28 |
+
else:
|
| 29 |
+
assert list(G[3]) == [2]
|
| 30 |
+
assert set(G[2]) == {1, 3}
|
| 31 |
+
pytest.raises(KeyError, G.__getitem__, 4)
|
| 32 |
+
pytest.raises(KeyError, G.__getitem__, 112)
|
| 33 |
+
pytest.raises(KeyError, G.__getitem__, 111)
|
| 34 |
+
assert G.degree(3) == (3 if G.is_multigraph() else 1)
|
| 35 |
+
assert G.size() == (7 if G.is_multigraph() else 5)
|
| 36 |
+
|
| 37 |
+
def test_hidden_edges(self):
|
| 38 |
+
hide_edges = [(2, 3), (8, 7), (222, 223)]
|
| 39 |
+
edges_gone = self.hide_edges_filter(hide_edges)
|
| 40 |
+
gview = self.gview
|
| 41 |
+
G = gview(self.G, filter_edge=edges_gone)
|
| 42 |
+
assert self.G.nodes == G.nodes
|
| 43 |
+
if G.is_directed():
|
| 44 |
+
assert self.G.edges - G.edges == {(2, 3)}
|
| 45 |
+
assert list(G[2]) == []
|
| 46 |
+
assert list(G.pred[3]) == []
|
| 47 |
+
assert list(G.pred[2]) == [1]
|
| 48 |
+
assert G.size() == 7
|
| 49 |
+
else:
|
| 50 |
+
assert self.G.edges - G.edges == {(2, 3), (7, 8)}
|
| 51 |
+
assert list(G[2]) == [1]
|
| 52 |
+
assert G.size() == 6
|
| 53 |
+
assert list(G[3]) == [4]
|
| 54 |
+
pytest.raises(KeyError, G.__getitem__, 221)
|
| 55 |
+
pytest.raises(KeyError, G.__getitem__, 222)
|
| 56 |
+
assert G.degree(3) == 1
|
| 57 |
+
|
| 58 |
+
def test_shown_node(self):
|
| 59 |
+
induced_subgraph = nx.filters.show_nodes([2, 3, 111])
|
| 60 |
+
gview = self.gview
|
| 61 |
+
G = gview(self.G, filter_node=induced_subgraph)
|
| 62 |
+
assert set(G.nodes) == {2, 3}
|
| 63 |
+
if G.is_directed():
|
| 64 |
+
assert list(G[3]) == []
|
| 65 |
+
else:
|
| 66 |
+
assert list(G[3]) == [2]
|
| 67 |
+
assert list(G[2]) == [3]
|
| 68 |
+
pytest.raises(KeyError, G.__getitem__, 4)
|
| 69 |
+
pytest.raises(KeyError, G.__getitem__, 112)
|
| 70 |
+
pytest.raises(KeyError, G.__getitem__, 111)
|
| 71 |
+
assert G.degree(3) == (3 if G.is_multigraph() else 1)
|
| 72 |
+
assert G.size() == (3 if G.is_multigraph() else 1)
|
| 73 |
+
|
| 74 |
+
def test_shown_edges(self):
|
| 75 |
+
show_edges = [(2, 3), (8, 7), (222, 223)]
|
| 76 |
+
edge_subgraph = self.show_edges_filter(show_edges)
|
| 77 |
+
G = self.gview(self.G, filter_edge=edge_subgraph)
|
| 78 |
+
assert self.G.nodes == G.nodes
|
| 79 |
+
if G.is_directed():
|
| 80 |
+
assert G.edges == {(2, 3)}
|
| 81 |
+
assert list(G[3]) == []
|
| 82 |
+
assert list(G[2]) == [3]
|
| 83 |
+
assert list(G.pred[3]) == [2]
|
| 84 |
+
assert list(G.pred[2]) == []
|
| 85 |
+
assert G.size() == 1
|
| 86 |
+
else:
|
| 87 |
+
assert G.edges == {(2, 3), (7, 8)}
|
| 88 |
+
assert list(G[3]) == [2]
|
| 89 |
+
assert list(G[2]) == [3]
|
| 90 |
+
assert G.size() == 2
|
| 91 |
+
pytest.raises(KeyError, G.__getitem__, 221)
|
| 92 |
+
pytest.raises(KeyError, G.__getitem__, 222)
|
| 93 |
+
assert G.degree(3) == 1
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
class TestSubDiGraphView(TestSubGraphView):
|
| 97 |
+
gview = staticmethod(nx.subgraph_view)
|
| 98 |
+
graph = nx.DiGraph
|
| 99 |
+
hide_edges_filter = staticmethod(nx.filters.hide_diedges)
|
| 100 |
+
show_edges_filter = staticmethod(nx.filters.show_diedges)
|
| 101 |
+
hide_edges = [(2, 3), (8, 7), (222, 223)]
|
| 102 |
+
excluded = {(2, 3), (3, 4), (4, 5), (5, 6)}
|
| 103 |
+
|
| 104 |
+
def test_inoutedges(self):
|
| 105 |
+
edges_gone = self.hide_edges_filter(self.hide_edges)
|
| 106 |
+
hide_nodes = [4, 5, 111]
|
| 107 |
+
nodes_gone = nx.filters.hide_nodes(hide_nodes)
|
| 108 |
+
G = self.gview(self.G, filter_node=nodes_gone, filter_edge=edges_gone)
|
| 109 |
+
|
| 110 |
+
assert self.G.in_edges - G.in_edges == self.excluded
|
| 111 |
+
assert self.G.out_edges - G.out_edges == self.excluded
|
| 112 |
+
|
| 113 |
+
def test_pred(self):
|
| 114 |
+
edges_gone = self.hide_edges_filter(self.hide_edges)
|
| 115 |
+
hide_nodes = [4, 5, 111]
|
| 116 |
+
nodes_gone = nx.filters.hide_nodes(hide_nodes)
|
| 117 |
+
G = self.gview(self.G, filter_node=nodes_gone, filter_edge=edges_gone)
|
| 118 |
+
|
| 119 |
+
assert list(G.pred[2]) == [1]
|
| 120 |
+
assert list(G.pred[6]) == []
|
| 121 |
+
|
| 122 |
+
def test_inout_degree(self):
|
| 123 |
+
edges_gone = self.hide_edges_filter(self.hide_edges)
|
| 124 |
+
hide_nodes = [4, 5, 111]
|
| 125 |
+
nodes_gone = nx.filters.hide_nodes(hide_nodes)
|
| 126 |
+
G = self.gview(self.G, filter_node=nodes_gone, filter_edge=edges_gone)
|
| 127 |
+
|
| 128 |
+
assert G.degree(2) == 1
|
| 129 |
+
assert G.out_degree(2) == 0
|
| 130 |
+
assert G.in_degree(2) == 1
|
| 131 |
+
assert G.size() == 4
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
# multigraph
|
| 135 |
+
class TestMultiGraphView(TestSubGraphView):
|
| 136 |
+
gview = staticmethod(nx.subgraph_view)
|
| 137 |
+
graph = nx.MultiGraph
|
| 138 |
+
hide_edges_filter = staticmethod(nx.filters.hide_multiedges)
|
| 139 |
+
show_edges_filter = staticmethod(nx.filters.show_multiedges)
|
| 140 |
+
|
| 141 |
+
@classmethod
|
| 142 |
+
def setup_class(cls):
|
| 143 |
+
cls.G = nx.path_graph(9, create_using=cls.graph())
|
| 144 |
+
multiedges = {(2, 3, 4), (2, 3, 5)}
|
| 145 |
+
cls.G.add_edges_from(multiedges)
|
| 146 |
+
cls.hide_edges_w_hide_nodes = {(3, 4, 0), (4, 5, 0), (5, 6, 0)}
|
| 147 |
+
|
| 148 |
+
def test_hidden_edges(self):
|
| 149 |
+
hide_edges = [(2, 3, 4), (2, 3, 3), (8, 7, 0), (222, 223, 0)]
|
| 150 |
+
edges_gone = self.hide_edges_filter(hide_edges)
|
| 151 |
+
G = self.gview(self.G, filter_edge=edges_gone)
|
| 152 |
+
assert self.G.nodes == G.nodes
|
| 153 |
+
if G.is_directed():
|
| 154 |
+
assert self.G.edges - G.edges == {(2, 3, 4)}
|
| 155 |
+
assert list(G[3]) == [4]
|
| 156 |
+
assert list(G[2]) == [3]
|
| 157 |
+
assert list(G.pred[3]) == [2] # only one 2 but two edges
|
| 158 |
+
assert list(G.pred[2]) == [1]
|
| 159 |
+
assert G.size() == 9
|
| 160 |
+
else:
|
| 161 |
+
assert self.G.edges - G.edges == {(2, 3, 4), (7, 8, 0)}
|
| 162 |
+
assert list(G[3]) == [2, 4]
|
| 163 |
+
assert list(G[2]) == [1, 3]
|
| 164 |
+
assert G.size() == 8
|
| 165 |
+
assert G.degree(3) == 3
|
| 166 |
+
pytest.raises(KeyError, G.__getitem__, 221)
|
| 167 |
+
pytest.raises(KeyError, G.__getitem__, 222)
|
| 168 |
+
|
| 169 |
+
def test_shown_edges(self):
|
| 170 |
+
show_edges = [(2, 3, 4), (2, 3, 3), (8, 7, 0), (222, 223, 0)]
|
| 171 |
+
edge_subgraph = self.show_edges_filter(show_edges)
|
| 172 |
+
G = self.gview(self.G, filter_edge=edge_subgraph)
|
| 173 |
+
assert self.G.nodes == G.nodes
|
| 174 |
+
if G.is_directed():
|
| 175 |
+
assert G.edges == {(2, 3, 4)}
|
| 176 |
+
assert list(G[3]) == []
|
| 177 |
+
assert list(G.pred[3]) == [2]
|
| 178 |
+
assert list(G.pred[2]) == []
|
| 179 |
+
assert G.size() == 1
|
| 180 |
+
else:
|
| 181 |
+
assert G.edges == {(2, 3, 4), (7, 8, 0)}
|
| 182 |
+
assert G.size() == 2
|
| 183 |
+
assert list(G[3]) == [2]
|
| 184 |
+
assert G.degree(3) == 1
|
| 185 |
+
assert list(G[2]) == [3]
|
| 186 |
+
pytest.raises(KeyError, G.__getitem__, 221)
|
| 187 |
+
pytest.raises(KeyError, G.__getitem__, 222)
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
# multidigraph
|
| 191 |
+
class TestMultiDiGraphView(TestMultiGraphView, TestSubDiGraphView):
|
| 192 |
+
gview = staticmethod(nx.subgraph_view)
|
| 193 |
+
graph = nx.MultiDiGraph
|
| 194 |
+
hide_edges_filter = staticmethod(nx.filters.hide_multidiedges)
|
| 195 |
+
show_edges_filter = staticmethod(nx.filters.show_multidiedges)
|
| 196 |
+
hide_edges = [(2, 3, 0), (8, 7, 0), (222, 223, 0)]
|
| 197 |
+
excluded = {(2, 3, 0), (3, 4, 0), (4, 5, 0), (5, 6, 0)}
|
| 198 |
+
|
| 199 |
+
def test_inout_degree(self):
|
| 200 |
+
edges_gone = self.hide_edges_filter(self.hide_edges)
|
| 201 |
+
hide_nodes = [4, 5, 111]
|
| 202 |
+
nodes_gone = nx.filters.hide_nodes(hide_nodes)
|
| 203 |
+
G = self.gview(self.G, filter_node=nodes_gone, filter_edge=edges_gone)
|
| 204 |
+
|
| 205 |
+
assert G.degree(2) == 3
|
| 206 |
+
assert G.out_degree(2) == 2
|
| 207 |
+
assert G.in_degree(2) == 1
|
| 208 |
+
assert G.size() == 6
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
# induced_subgraph
|
| 212 |
+
class TestInducedSubGraph:
|
| 213 |
+
@classmethod
|
| 214 |
+
def setup_class(cls):
|
| 215 |
+
cls.K3 = G = nx.complete_graph(3)
|
| 216 |
+
G.graph["foo"] = []
|
| 217 |
+
G.nodes[0]["foo"] = []
|
| 218 |
+
G.remove_edge(1, 2)
|
| 219 |
+
ll = []
|
| 220 |
+
G.add_edge(1, 2, foo=ll)
|
| 221 |
+
G.add_edge(2, 1, foo=ll)
|
| 222 |
+
|
| 223 |
+
def test_full_graph(self):
|
| 224 |
+
G = self.K3
|
| 225 |
+
H = nx.induced_subgraph(G, [0, 1, 2, 5])
|
| 226 |
+
assert H.name == G.name
|
| 227 |
+
self.graphs_equal(H, G)
|
| 228 |
+
self.same_attrdict(H, G)
|
| 229 |
+
|
| 230 |
+
def test_partial_subgraph(self):
|
| 231 |
+
G = self.K3
|
| 232 |
+
H = nx.induced_subgraph(G, 0)
|
| 233 |
+
assert dict(H.adj) == {0: {}}
|
| 234 |
+
assert dict(G.adj) != {0: {}}
|
| 235 |
+
|
| 236 |
+
H = nx.induced_subgraph(G, [0, 1])
|
| 237 |
+
assert dict(H.adj) == {0: {1: {}}, 1: {0: {}}}
|
| 238 |
+
|
| 239 |
+
def same_attrdict(self, H, G):
|
| 240 |
+
old_foo = H[1][2]["foo"]
|
| 241 |
+
H.edges[1, 2]["foo"] = "baz"
|
| 242 |
+
assert G.edges == H.edges
|
| 243 |
+
H.edges[1, 2]["foo"] = old_foo
|
| 244 |
+
assert G.edges == H.edges
|
| 245 |
+
old_foo = H.nodes[0]["foo"]
|
| 246 |
+
H.nodes[0]["foo"] = "baz"
|
| 247 |
+
assert G.nodes == H.nodes
|
| 248 |
+
H.nodes[0]["foo"] = old_foo
|
| 249 |
+
assert G.nodes == H.nodes
|
| 250 |
+
|
| 251 |
+
def graphs_equal(self, H, G):
|
| 252 |
+
assert G._adj == H._adj
|
| 253 |
+
assert G._node == H._node
|
| 254 |
+
assert G.graph == H.graph
|
| 255 |
+
assert G.name == H.name
|
| 256 |
+
if not G.is_directed() and not H.is_directed():
|
| 257 |
+
assert H._adj[1][2] is H._adj[2][1]
|
| 258 |
+
assert G._adj[1][2] is G._adj[2][1]
|
| 259 |
+
else: # at least one is directed
|
| 260 |
+
if not G.is_directed():
|
| 261 |
+
G._pred = G._adj
|
| 262 |
+
G._succ = G._adj
|
| 263 |
+
if not H.is_directed():
|
| 264 |
+
H._pred = H._adj
|
| 265 |
+
H._succ = H._adj
|
| 266 |
+
assert G._pred == H._pred
|
| 267 |
+
assert G._succ == H._succ
|
| 268 |
+
assert H._succ[1][2] is H._pred[2][1]
|
| 269 |
+
assert G._succ[1][2] is G._pred[2][1]
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
# edge_subgraph
|
| 273 |
+
class TestEdgeSubGraph:
|
| 274 |
+
@classmethod
|
| 275 |
+
def setup_class(cls):
|
| 276 |
+
# Create a path graph on five nodes.
|
| 277 |
+
cls.G = G = nx.path_graph(5)
|
| 278 |
+
# Add some node, edge, and graph attributes.
|
| 279 |
+
for i in range(5):
|
| 280 |
+
G.nodes[i]["name"] = f"node{i}"
|
| 281 |
+
G.edges[0, 1]["name"] = "edge01"
|
| 282 |
+
G.edges[3, 4]["name"] = "edge34"
|
| 283 |
+
G.graph["name"] = "graph"
|
| 284 |
+
# Get the subgraph induced by the first and last edges.
|
| 285 |
+
cls.H = nx.edge_subgraph(G, [(0, 1), (3, 4)])
|
| 286 |
+
|
| 287 |
+
def test_correct_nodes(self):
|
| 288 |
+
"""Tests that the subgraph has the correct nodes."""
|
| 289 |
+
assert [(0, "node0"), (1, "node1"), (3, "node3"), (4, "node4")] == sorted(
|
| 290 |
+
self.H.nodes.data("name")
|
| 291 |
+
)
|
| 292 |
+
|
| 293 |
+
def test_correct_edges(self):
|
| 294 |
+
"""Tests that the subgraph has the correct edges."""
|
| 295 |
+
assert edges_equal(
|
| 296 |
+
[(0, 1, "edge01"), (3, 4, "edge34")], self.H.edges.data("name")
|
| 297 |
+
)
|
| 298 |
+
|
| 299 |
+
def test_add_node(self):
|
| 300 |
+
"""Tests that adding a node to the original graph does not
|
| 301 |
+
affect the nodes of the subgraph.
|
| 302 |
+
|
| 303 |
+
"""
|
| 304 |
+
self.G.add_node(5)
|
| 305 |
+
assert [0, 1, 3, 4] == sorted(self.H.nodes)
|
| 306 |
+
self.G.remove_node(5)
|
| 307 |
+
|
| 308 |
+
def test_remove_node(self):
|
| 309 |
+
"""Tests that removing a node in the original graph
|
| 310 |
+
removes the nodes of the subgraph.
|
| 311 |
+
|
| 312 |
+
"""
|
| 313 |
+
self.G.remove_node(0)
|
| 314 |
+
assert [1, 3, 4] == sorted(self.H.nodes)
|
| 315 |
+
self.G.add_node(0, name="node0")
|
| 316 |
+
self.G.add_edge(0, 1, name="edge01")
|
| 317 |
+
|
| 318 |
+
def test_node_attr_dict(self):
|
| 319 |
+
"""Tests that the node attribute dictionary of the two graphs is
|
| 320 |
+
the same object.
|
| 321 |
+
|
| 322 |
+
"""
|
| 323 |
+
for v in self.H:
|
| 324 |
+
assert self.G.nodes[v] == self.H.nodes[v]
|
| 325 |
+
# Making a change to G should make a change in H and vice versa.
|
| 326 |
+
self.G.nodes[0]["name"] = "foo"
|
| 327 |
+
assert self.G.nodes[0] == self.H.nodes[0]
|
| 328 |
+
self.H.nodes[1]["name"] = "bar"
|
| 329 |
+
assert self.G.nodes[1] == self.H.nodes[1]
|
| 330 |
+
# Revert the change, so tests pass with pytest-randomly
|
| 331 |
+
self.G.nodes[0]["name"] = "node0"
|
| 332 |
+
self.H.nodes[1]["name"] = "node1"
|
| 333 |
+
|
| 334 |
+
def test_edge_attr_dict(self):
|
| 335 |
+
"""Tests that the edge attribute dictionary of the two graphs is
|
| 336 |
+
the same object.
|
| 337 |
+
|
| 338 |
+
"""
|
| 339 |
+
for u, v in self.H.edges():
|
| 340 |
+
assert self.G.edges[u, v] == self.H.edges[u, v]
|
| 341 |
+
# Making a change to G should make a change in H and vice versa.
|
| 342 |
+
self.G.edges[0, 1]["name"] = "foo"
|
| 343 |
+
assert self.G.edges[0, 1]["name"] == self.H.edges[0, 1]["name"]
|
| 344 |
+
self.H.edges[3, 4]["name"] = "bar"
|
| 345 |
+
assert self.G.edges[3, 4]["name"] == self.H.edges[3, 4]["name"]
|
| 346 |
+
# Revert the change, so tests pass with pytest-randomly
|
| 347 |
+
self.G.edges[0, 1]["name"] = "edge01"
|
| 348 |
+
self.H.edges[3, 4]["name"] = "edge34"
|
| 349 |
+
|
| 350 |
+
def test_graph_attr_dict(self):
|
| 351 |
+
"""Tests that the graph attribute dictionary of the two graphs
|
| 352 |
+
is the same object.
|
| 353 |
+
|
| 354 |
+
"""
|
| 355 |
+
assert self.G.graph is self.H.graph
|
| 356 |
+
|
| 357 |
+
def test_readonly(self):
|
| 358 |
+
"""Tests that the subgraph cannot change the graph structure"""
|
| 359 |
+
pytest.raises(nx.NetworkXError, self.H.add_node, 5)
|
| 360 |
+
pytest.raises(nx.NetworkXError, self.H.remove_node, 0)
|
| 361 |
+
pytest.raises(nx.NetworkXError, self.H.add_edge, 5, 6)
|
| 362 |
+
pytest.raises(nx.NetworkXError, self.H.remove_edge, 0, 1)
|
wemm/lib/python3.10/site-packages/networkx/readwrite/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (707 Bytes). View file
|
|
|
wemm/lib/python3.10/site-packages/networkx/readwrite/__pycache__/graph6.cpython-310.pyc
ADDED
|
Binary file (11.7 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/networkx/readwrite/__pycache__/graphml.cpython-310.pyc
ADDED
|
Binary file (28.5 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/networkx/readwrite/__pycache__/leda.cpython-310.pyc
ADDED
|
Binary file (2.87 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/networkx/readwrite/__pycache__/p2g.cpython-310.pyc
ADDED
|
Binary file (3.07 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/networkx/readwrite/__pycache__/text.cpython-310.pyc
ADDED
|
Binary file (20.3 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/networkx/readwrite/json_graph/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (861 Bytes). View file
|
|
|
wemm/lib/python3.10/site-packages/networkx/readwrite/json_graph/cytoscape.py
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import networkx as nx
|
| 2 |
+
|
| 3 |
+
__all__ = ["cytoscape_data", "cytoscape_graph"]
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def cytoscape_data(G, name="name", ident="id"):
|
| 7 |
+
"""Returns data in Cytoscape JSON format (cyjs).
|
| 8 |
+
|
| 9 |
+
Parameters
|
| 10 |
+
----------
|
| 11 |
+
G : NetworkX Graph
|
| 12 |
+
The graph to convert to cytoscape format
|
| 13 |
+
name : string
|
| 14 |
+
A string which is mapped to the 'name' node element in cyjs format.
|
| 15 |
+
Must not have the same value as `ident`.
|
| 16 |
+
ident : string
|
| 17 |
+
A string which is mapped to the 'id' node element in cyjs format.
|
| 18 |
+
Must not have the same value as `name`.
|
| 19 |
+
|
| 20 |
+
Returns
|
| 21 |
+
-------
|
| 22 |
+
data: dict
|
| 23 |
+
A dictionary with cyjs formatted data.
|
| 24 |
+
|
| 25 |
+
Raises
|
| 26 |
+
------
|
| 27 |
+
NetworkXError
|
| 28 |
+
If the values for `name` and `ident` are identical.
|
| 29 |
+
|
| 30 |
+
See Also
|
| 31 |
+
--------
|
| 32 |
+
cytoscape_graph: convert a dictionary in cyjs format to a graph
|
| 33 |
+
|
| 34 |
+
References
|
| 35 |
+
----------
|
| 36 |
+
.. [1] Cytoscape user's manual:
|
| 37 |
+
http://manual.cytoscape.org/en/stable/index.html
|
| 38 |
+
|
| 39 |
+
Examples
|
| 40 |
+
--------
|
| 41 |
+
>>> G = nx.path_graph(2)
|
| 42 |
+
>>> nx.cytoscape_data(G) # doctest: +SKIP
|
| 43 |
+
{'data': [],
|
| 44 |
+
'directed': False,
|
| 45 |
+
'multigraph': False,
|
| 46 |
+
'elements': {'nodes': [{'data': {'id': '0', 'value': 0, 'name': '0'}},
|
| 47 |
+
{'data': {'id': '1', 'value': 1, 'name': '1'}}],
|
| 48 |
+
'edges': [{'data': {'source': 0, 'target': 1}}]}}
|
| 49 |
+
"""
|
| 50 |
+
if name == ident:
|
| 51 |
+
raise nx.NetworkXError("name and ident must be different.")
|
| 52 |
+
|
| 53 |
+
jsondata = {"data": list(G.graph.items())}
|
| 54 |
+
jsondata["directed"] = G.is_directed()
|
| 55 |
+
jsondata["multigraph"] = G.is_multigraph()
|
| 56 |
+
jsondata["elements"] = {"nodes": [], "edges": []}
|
| 57 |
+
nodes = jsondata["elements"]["nodes"]
|
| 58 |
+
edges = jsondata["elements"]["edges"]
|
| 59 |
+
|
| 60 |
+
for i, j in G.nodes.items():
|
| 61 |
+
n = {"data": j.copy()}
|
| 62 |
+
n["data"]["id"] = j.get(ident) or str(i)
|
| 63 |
+
n["data"]["value"] = i
|
| 64 |
+
n["data"]["name"] = j.get(name) or str(i)
|
| 65 |
+
nodes.append(n)
|
| 66 |
+
|
| 67 |
+
if G.is_multigraph():
|
| 68 |
+
for e in G.edges(keys=True):
|
| 69 |
+
n = {"data": G.adj[e[0]][e[1]][e[2]].copy()}
|
| 70 |
+
n["data"]["source"] = e[0]
|
| 71 |
+
n["data"]["target"] = e[1]
|
| 72 |
+
n["data"]["key"] = e[2]
|
| 73 |
+
edges.append(n)
|
| 74 |
+
else:
|
| 75 |
+
for e in G.edges():
|
| 76 |
+
n = {"data": G.adj[e[0]][e[1]].copy()}
|
| 77 |
+
n["data"]["source"] = e[0]
|
| 78 |
+
n["data"]["target"] = e[1]
|
| 79 |
+
edges.append(n)
|
| 80 |
+
return jsondata
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
@nx._dispatchable(graphs=None, returns_graph=True)
|
| 84 |
+
def cytoscape_graph(data, name="name", ident="id"):
|
| 85 |
+
"""
|
| 86 |
+
Create a NetworkX graph from a dictionary in cytoscape JSON format.
|
| 87 |
+
|
| 88 |
+
Parameters
|
| 89 |
+
----------
|
| 90 |
+
data : dict
|
| 91 |
+
A dictionary of data conforming to cytoscape JSON format.
|
| 92 |
+
name : string
|
| 93 |
+
A string which is mapped to the 'name' node element in cyjs format.
|
| 94 |
+
Must not have the same value as `ident`.
|
| 95 |
+
ident : string
|
| 96 |
+
A string which is mapped to the 'id' node element in cyjs format.
|
| 97 |
+
Must not have the same value as `name`.
|
| 98 |
+
|
| 99 |
+
Returns
|
| 100 |
+
-------
|
| 101 |
+
graph : a NetworkX graph instance
|
| 102 |
+
The `graph` can be an instance of `Graph`, `DiGraph`, `MultiGraph`, or
|
| 103 |
+
`MultiDiGraph` depending on the input data.
|
| 104 |
+
|
| 105 |
+
Raises
|
| 106 |
+
------
|
| 107 |
+
NetworkXError
|
| 108 |
+
If the `name` and `ident` attributes are identical.
|
| 109 |
+
|
| 110 |
+
See Also
|
| 111 |
+
--------
|
| 112 |
+
cytoscape_data: convert a NetworkX graph to a dict in cyjs format
|
| 113 |
+
|
| 114 |
+
References
|
| 115 |
+
----------
|
| 116 |
+
.. [1] Cytoscape user's manual:
|
| 117 |
+
http://manual.cytoscape.org/en/stable/index.html
|
| 118 |
+
|
| 119 |
+
Examples
|
| 120 |
+
--------
|
| 121 |
+
>>> data_dict = {
|
| 122 |
+
... "data": [],
|
| 123 |
+
... "directed": False,
|
| 124 |
+
... "multigraph": False,
|
| 125 |
+
... "elements": {
|
| 126 |
+
... "nodes": [
|
| 127 |
+
... {"data": {"id": "0", "value": 0, "name": "0"}},
|
| 128 |
+
... {"data": {"id": "1", "value": 1, "name": "1"}},
|
| 129 |
+
... ],
|
| 130 |
+
... "edges": [{"data": {"source": 0, "target": 1}}],
|
| 131 |
+
... },
|
| 132 |
+
... }
|
| 133 |
+
>>> G = nx.cytoscape_graph(data_dict)
|
| 134 |
+
>>> G.name
|
| 135 |
+
''
|
| 136 |
+
>>> G.nodes()
|
| 137 |
+
NodeView((0, 1))
|
| 138 |
+
>>> G.nodes(data=True)[0]
|
| 139 |
+
{'id': '0', 'value': 0, 'name': '0'}
|
| 140 |
+
>>> G.edges(data=True)
|
| 141 |
+
EdgeDataView([(0, 1, {'source': 0, 'target': 1})])
|
| 142 |
+
"""
|
| 143 |
+
if name == ident:
|
| 144 |
+
raise nx.NetworkXError("name and ident must be different.")
|
| 145 |
+
|
| 146 |
+
multigraph = data.get("multigraph")
|
| 147 |
+
directed = data.get("directed")
|
| 148 |
+
if multigraph:
|
| 149 |
+
graph = nx.MultiGraph()
|
| 150 |
+
else:
|
| 151 |
+
graph = nx.Graph()
|
| 152 |
+
if directed:
|
| 153 |
+
graph = graph.to_directed()
|
| 154 |
+
graph.graph = dict(data.get("data"))
|
| 155 |
+
for d in data["elements"]["nodes"]:
|
| 156 |
+
node_data = d["data"].copy()
|
| 157 |
+
node = d["data"]["value"]
|
| 158 |
+
|
| 159 |
+
if d["data"].get(name):
|
| 160 |
+
node_data[name] = d["data"].get(name)
|
| 161 |
+
if d["data"].get(ident):
|
| 162 |
+
node_data[ident] = d["data"].get(ident)
|
| 163 |
+
|
| 164 |
+
graph.add_node(node)
|
| 165 |
+
graph.nodes[node].update(node_data)
|
| 166 |
+
|
| 167 |
+
for d in data["elements"]["edges"]:
|
| 168 |
+
edge_data = d["data"].copy()
|
| 169 |
+
sour = d["data"]["source"]
|
| 170 |
+
targ = d["data"]["target"]
|
| 171 |
+
if multigraph:
|
| 172 |
+
key = d["data"].get("key", 0)
|
| 173 |
+
graph.add_edge(sour, targ, key=key)
|
| 174 |
+
graph.edges[sour, targ, key].update(edge_data)
|
| 175 |
+
else:
|
| 176 |
+
graph.add_edge(sour, targ)
|
| 177 |
+
graph.edges[sour, targ].update(edge_data)
|
| 178 |
+
return graph
|
wemm/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (174 Bytes). View file
|
|
|
wemm/lib/python3.10/site-packages/nvidia_cublas_cu11-11.10.3.66.dist-info/REQUESTED
ADDED
|
File without changes
|
wemm/lib/python3.10/site-packages/simplejson-3.19.3.dist-info/WHEEL
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Wheel-Version: 1.0
|
| 2 |
+
Generator: setuptools (72.1.0)
|
| 3 |
+
Root-Is-Purelib: false
|
| 4 |
+
Tag: cp310-cp310-manylinux_2_5_x86_64
|
| 5 |
+
Tag: cp310-cp310-manylinux1_x86_64
|
| 6 |
+
Tag: cp310-cp310-manylinux_2_17_x86_64
|
| 7 |
+
Tag: cp310-cp310-manylinux2014_x86_64
|
| 8 |
+
|
wemm/lib/python3.10/site-packages/simplejson-3.19.3.dist-info/top_level.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
simplejson
|
wemm/lib/python3.10/site-packages/torchgen/__init__.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""torchgen
|
| 2 |
+
|
| 3 |
+
This module contains codegeneration utilities for PyTorch. It is used to
|
| 4 |
+
build PyTorch from source, but may also be used for out-of-tree projects
|
| 5 |
+
that extend PyTorch.
|
| 6 |
+
|
| 7 |
+
Note well that we provide no BC guarantees for torchgen. If you're interested
|
| 8 |
+
in using torchgen and want the PyTorch team to be aware, please reach out
|
| 9 |
+
on GitHub.
|
| 10 |
+
"""
|
wemm/lib/python3.10/site-packages/torchgen/__pycache__/context.cpython-310.pyc
ADDED
|
Binary file (3.39 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchgen/__pycache__/gen.cpython-310.pyc
ADDED
|
Binary file (64 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchgen/__pycache__/gen_lazy_tensor.cpython-310.pyc
ADDED
|
Binary file (13.9 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchgen/__pycache__/local.cpython-310.pyc
ADDED
|
Binary file (1.34 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchgen/__pycache__/model.cpython-310.pyc
ADDED
|
Binary file (63.9 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchgen/api/__pycache__/cpp.cpython-310.pyc
ADDED
|
Binary file (9.08 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchgen/api/__pycache__/lazy.cpython-310.pyc
ADDED
|
Binary file (11.4 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchgen/api/__pycache__/translate.cpython-310.pyc
ADDED
|
Binary file (7.38 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchgen/api/functionalization.py
ADDED
|
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Optional
|
| 2 |
+
|
| 3 |
+
from torchgen.api import dispatcher
|
| 4 |
+
from torchgen.api.types import (
|
| 5 |
+
BaseCType,
|
| 6 |
+
Binding,
|
| 7 |
+
boolT,
|
| 8 |
+
ConstRefCType,
|
| 9 |
+
CType,
|
| 10 |
+
longT,
|
| 11 |
+
NamedCType,
|
| 12 |
+
tensorT,
|
| 13 |
+
)
|
| 14 |
+
from torchgen.model import (
|
| 15 |
+
Argument,
|
| 16 |
+
BaseTy,
|
| 17 |
+
BaseType,
|
| 18 |
+
FunctionSchema,
|
| 19 |
+
NativeFunctionsViewGroup,
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
# This file describes the translation of JIT schema to API's used
|
| 24 |
+
# when creating view lambdas that are used by the functionalization pass.
|
| 25 |
+
# There are two types of lambdas: forward lambdas and reverse lambdas.
|
| 26 |
+
# These API's mostly follow the dispatcher API, with a few quirks:
|
| 27 |
+
# - The lambda capture has to convert reference types to value types
|
| 28 |
+
# - While the forward lambda just directly calls into the at::_ops API
|
| 29 |
+
# (following the dispatcher convention), the logic here for the reverse lambda
|
| 30 |
+
# is responsible for generating both the call-site, and the declarations
|
| 31 |
+
# (which are implemented manually in the at::functionalization::impl namespace).
|
| 32 |
+
|
| 33 |
+
# The lambdas generated for each view op in the functionalization pass are of the form
|
| 34 |
+
# [capture_arguments](outer_arguments) -> returns_type {
|
| 35 |
+
# return name(inner_arguments);
|
| 36 |
+
# }
|
| 37 |
+
|
| 38 |
+
# Define some specific lambda input arguments.
|
| 39 |
+
base_binding = Binding(
|
| 40 |
+
name="base",
|
| 41 |
+
nctype=NamedCType(name="base", type=ConstRefCType(BaseCType(tensorT))),
|
| 42 |
+
argument=Argument(
|
| 43 |
+
name="base", type=BaseType(BaseTy.Tensor), default=None, annotation=None
|
| 44 |
+
),
|
| 45 |
+
default=None,
|
| 46 |
+
)
|
| 47 |
+
mutated_view_binding = Binding(
|
| 48 |
+
name="mutated_view",
|
| 49 |
+
nctype=NamedCType(name="mutated_view", type=ConstRefCType(BaseCType(tensorT))),
|
| 50 |
+
argument=Argument(
|
| 51 |
+
name="base", type=BaseType(BaseTy.Tensor), default=None, annotation=None
|
| 52 |
+
),
|
| 53 |
+
default=None,
|
| 54 |
+
)
|
| 55 |
+
mutated_view_idx_binding = Binding(
|
| 56 |
+
name="mutated_view_idx",
|
| 57 |
+
nctype=NamedCType(name="mutated_view_idx", type=BaseCType(longT)),
|
| 58 |
+
argument=Argument(
|
| 59 |
+
name="base", type=BaseType(BaseTy.Tensor), default=None, annotation=None
|
| 60 |
+
),
|
| 61 |
+
default=None,
|
| 62 |
+
)
|
| 63 |
+
reapply_views_binding = Binding(
|
| 64 |
+
name="reapply_views",
|
| 65 |
+
nctype=NamedCType(name="reapply_views", type=BaseCType(boolT)),
|
| 66 |
+
argument=Argument(
|
| 67 |
+
name="reapply_views", type=BaseType(BaseTy.bool), default=None, annotation=None
|
| 68 |
+
),
|
| 69 |
+
default=None,
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
# The lambda capture itself doesn't have a name.
|
| 73 |
+
# The name returned here corresponds to the name of the inner function called by the lambda.
|
| 74 |
+
def name(
|
| 75 |
+
g: NativeFunctionsViewGroup,
|
| 76 |
+
*,
|
| 77 |
+
is_reverse: bool,
|
| 78 |
+
include_namespace: bool,
|
| 79 |
+
reapply_views: Optional[bool] = None,
|
| 80 |
+
) -> str:
|
| 81 |
+
if reapply_views is None:
|
| 82 |
+
# reapply_views is only important for the fwd lambda,
|
| 83 |
+
# since we always plumb the runtime "reapply_views" argument into the reverse function.
|
| 84 |
+
assert is_reverse
|
| 85 |
+
if is_reverse:
|
| 86 |
+
# for the reverse: the name of the inverse function always involves "view_copy",
|
| 87 |
+
# and we plumb the "reapply_views" flag into that function.
|
| 88 |
+
# (We could avoid doing that, but that would require writing out twice as many view inverse functions).
|
| 89 |
+
assert g.view_copy is not None
|
| 90 |
+
api_name = g.view_copy.func.name.unambiguous_name()
|
| 91 |
+
# in the reverse case, we codegen both the call-sites (which need the full namespace) and the declarations (which don't)
|
| 92 |
+
if include_namespace:
|
| 93 |
+
return f"at::functionalization::FunctionalInverses::{api_name}_inverse"
|
| 94 |
+
else:
|
| 95 |
+
return f"{api_name}_inverse"
|
| 96 |
+
# in the forward case, we just directly call into the at::_ops API (so we always need the namespace)
|
| 97 |
+
assert include_namespace
|
| 98 |
+
assert g.view_copy is not None
|
| 99 |
+
api_name = (
|
| 100 |
+
g.view.func.name.unambiguous_name()
|
| 101 |
+
if reapply_views
|
| 102 |
+
else g.view_copy.func.name.unambiguous_name()
|
| 103 |
+
)
|
| 104 |
+
return f"at::_ops::{api_name}::call"
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def capture_arguments(func: FunctionSchema, *, is_reverse: bool) -> List[Binding]:
|
| 108 |
+
# capture arguments include all arguments except `self`.
|
| 109 |
+
# Importantly, they don't include any C++ reference types (or else we'll get a dangling reference in the capture),
|
| 110 |
+
# So any reference types (IntArrayRef) need to be converted to value types (vector<int64_t>)
|
| 111 |
+
args = func.arguments.flat_all
|
| 112 |
+
assert args[0].type == BaseType(BaseTy.Tensor)
|
| 113 |
+
non_self_args = args[1:]
|
| 114 |
+
non_self_value_bindings = [
|
| 115 |
+
dispatcher.argument(a, remove_non_owning_ref_types=True) for a in non_self_args
|
| 116 |
+
]
|
| 117 |
+
all_bindings = [reapply_views_binding] + non_self_value_bindings
|
| 118 |
+
return all_bindings
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def returns_type(func: FunctionSchema) -> CType:
|
| 122 |
+
# Assertion: all view ops return tensor-like outputs
|
| 123 |
+
assert len(func.returns) >= 1
|
| 124 |
+
for ret in func.returns:
|
| 125 |
+
assert ret.type.is_tensor_like()
|
| 126 |
+
# However, the return type of the lambda is always an individual tensor.
|
| 127 |
+
# For multi-tensor outputs, each tensor needs to be tracked individually.
|
| 128 |
+
return BaseCType(tensorT)
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def outer_arguments(*, is_reverse: bool) -> List[Binding]:
|
| 132 |
+
if is_reverse:
|
| 133 |
+
return [base_binding, mutated_view_binding, mutated_view_idx_binding]
|
| 134 |
+
else:
|
| 135 |
+
return [base_binding, mutated_view_idx_binding]
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def inner_call_index(func: FunctionSchema) -> Optional[Binding]:
|
| 139 |
+
# For view ops that return multiple tensors (like `split`), we generate a separate lambda for each output.
|
| 140 |
+
# When we replay a view op that returns multiple tensors, we need to index into the output appropriately
|
| 141 |
+
if len(func.returns) > 1 or (
|
| 142 |
+
len(func.returns) == 1 and func.returns[0].type.is_list_like()
|
| 143 |
+
):
|
| 144 |
+
return mutated_view_idx_binding
|
| 145 |
+
return None
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def inner_arguments(func: FunctionSchema, is_reverse: bool) -> List[Binding]:
|
| 149 |
+
args = func.arguments.flat_all
|
| 150 |
+
assert args[0].type == BaseType(BaseTy.Tensor)
|
| 151 |
+
non_self_args = args[1:]
|
| 152 |
+
# The forward lambda calls the at::_ops API, while the reverse lambda calls the view inverse API.
|
| 153 |
+
# Both of these follow the dispatcher API.
|
| 154 |
+
non_self_bindings = [dispatcher.argument(a) for a in non_self_args]
|
| 155 |
+
if not is_reverse:
|
| 156 |
+
# the forward lambda swaps out the original tensor argument with the lambd arg "base"
|
| 157 |
+
return [base_binding] + non_self_bindings
|
| 158 |
+
else:
|
| 159 |
+
# the reverse lambda does the same, but with an additional "mutated_view" arg
|
| 160 |
+
# additionally, we have a calling convention: for view ops that return multiple tensor outputs
|
| 161 |
+
# their corresponding view_inverse function takes in an additional index argument.
|
| 162 |
+
index_binding = inner_call_index(func)
|
| 163 |
+
if index_binding is not None:
|
| 164 |
+
return [
|
| 165 |
+
base_binding,
|
| 166 |
+
mutated_view_binding,
|
| 167 |
+
reapply_views_binding,
|
| 168 |
+
index_binding,
|
| 169 |
+
] + non_self_bindings
|
| 170 |
+
else:
|
| 171 |
+
return [
|
| 172 |
+
base_binding,
|
| 173 |
+
mutated_view_binding,
|
| 174 |
+
reapply_views_binding,
|
| 175 |
+
] + non_self_bindings
|
wemm/lib/python3.10/site-packages/torchgen/api/meta.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from torchgen.model import NativeFunctionsGroup
|
| 2 |
+
|
| 3 |
+
# Follows dispatcher calling convention, but:
|
| 4 |
+
# - Mutable arguments not allowed. Meta functions are always
|
| 5 |
+
# written in functional form. Look at FunctionSchema.signature()
|
| 6 |
+
# - No tensor returns; instead we return a TensorMeta describing
|
| 7 |
+
# the tensor in question
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def name(g: NativeFunctionsGroup) -> str:
|
| 11 |
+
# use the overload name from the functional version
|
| 12 |
+
return str(g.functional.func.name).replace(".", "_")
|
wemm/lib/python3.10/site-packages/torchgen/api/python.py
ADDED
|
@@ -0,0 +1,1476 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass
|
| 2 |
+
from typing import Dict, List, Optional, Sequence, Set, Tuple, Union
|
| 3 |
+
|
| 4 |
+
from torchgen.api import cpp
|
| 5 |
+
|
| 6 |
+
from torchgen.api.types import Binding, CppSignature, CppSignatureGroup
|
| 7 |
+
from torchgen.gen import pythonify_default
|
| 8 |
+
from torchgen.model import (
|
| 9 |
+
Argument,
|
| 10 |
+
BaseTy,
|
| 11 |
+
BaseType,
|
| 12 |
+
FunctionSchema,
|
| 13 |
+
ListType,
|
| 14 |
+
NativeFunction,
|
| 15 |
+
OptionalType,
|
| 16 |
+
Return,
|
| 17 |
+
Type,
|
| 18 |
+
Variant,
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 22 |
+
#
|
| 23 |
+
# Data Models
|
| 24 |
+
#
|
| 25 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 26 |
+
#
|
| 27 |
+
# [Notes] python binding codegen
|
| 28 |
+
#
|
| 29 |
+
# The Python binding codegen produces code that takes the input list of
|
| 30 |
+
# PyObjects, finds the matching ATen C++ function using PythonArgParser,
|
| 31 |
+
# converts the PyObjects into C++ types and calls the ATen C++ function:
|
| 32 |
+
#
|
| 33 |
+
# +--------+ parsing +------------------------+ binding +-----------------------+
|
| 34 |
+
# | PyObjs | ---------> | PythonArgParser Output | ---------> | Cpp Function Dispatch |
|
| 35 |
+
# +--------+ +------------------------+ +-----------------------+
|
| 36 |
+
#
|
| 37 |
+
# The following examples demonstrate the data models the Python binding
|
| 38 |
+
# codegen needs to deal with and the tasks it needs to accomplish. It
|
| 39 |
+
# helps understand the purpose of the new data types we introduced below.
|
| 40 |
+
#
|
| 41 |
+
# - Function Schema (source of truth)
|
| 42 |
+
#
|
| 43 |
+
# aten::empty.names(int[] size, *, Dimname[]? names,
|
| 44 |
+
# ScalarType? dtype=None, Layout? layout=None,
|
| 45 |
+
# Device? device=None, bool? pin_memory=None,
|
| 46 |
+
# MemoryFormat? memory_format=None) -> Tensor
|
| 47 |
+
#
|
| 48 |
+
# - Python Signature
|
| 49 |
+
#
|
| 50 |
+
# It's used to generate input schema string for PythonArgParser.
|
| 51 |
+
# Note: TensorOptions fields are reordered and the additional
|
| 52 |
+
# 'requires_grad' field is added:
|
| 53 |
+
#
|
| 54 |
+
# empty(IntArrayRef size, *, DimnameList? names,
|
| 55 |
+
# MemoryFormat? memory_format=None, ScalarType dtype=None,
|
| 56 |
+
# Layout layout=torch.strided, Device device=None,
|
| 57 |
+
# bool pin_memory=False, bool requires_grad=False)
|
| 58 |
+
#
|
| 59 |
+
# - C++ Signature
|
| 60 |
+
#
|
| 61 |
+
# It's used to generate C++ lambda formals & dispatch call.
|
| 62 |
+
# Note: the scattered TensorOptions fields are packed into 'options'.
|
| 63 |
+
#
|
| 64 |
+
# auto dispatch_empty =
|
| 65 |
+
# [](IntArrayRef size, c10::optional<DimnameList> names,
|
| 66 |
+
# const TensorOptions & options,
|
| 67 |
+
# c10::optional<MemoryFormat> memory_format) -> Tensor {
|
| 68 |
+
# pybind11::gil_scoped_release no_gil;
|
| 69 |
+
# return torch::empty(size, names, options, memory_format);
|
| 70 |
+
# };
|
| 71 |
+
#
|
| 72 |
+
# - Binding between Python Arguments and C++ Arguments
|
| 73 |
+
#
|
| 74 |
+
# Given a set of Python Arguments in scope, we need produce the
|
| 75 |
+
# binding expressions that translate the Python API into C++ API:
|
| 76 |
+
#
|
| 77 |
+
# Python Args Cpp Args Binding Exprs
|
| 78 |
+
# -----------------------------------------------------------------
|
| 79 |
+
# 0: size size '_r.intlist(0)'
|
| 80 |
+
# 1: names names 'names' [special init]
|
| 81 |
+
# 2: memory_format -------+
|
| 82 |
+
# 3: dtype -----+-|--> options 'options' [special packing]
|
| 83 |
+
# 4: layout / |
|
| 84 |
+
# 5: device / +--> memory_format '_r.memoryformatOptional(2)'
|
| 85 |
+
# 6: pin_memory /
|
| 86 |
+
# 7: requires_grad -+
|
| 87 |
+
#
|
| 88 |
+
# So the full dispatch expression would look like:
|
| 89 |
+
#
|
| 90 |
+
# dispatch_empty(_r.intlist(0), names, options,
|
| 91 |
+
# _r.memoryformatOptional(2))
|
| 92 |
+
#
|
| 93 |
+
# Where does 'names' come from? It involves special local init:
|
| 94 |
+
#
|
| 95 |
+
# auto __names = _r.toDimnameListOptional(1);
|
| 96 |
+
# c10::optional<DimnameList> names =
|
| 97 |
+
# __names ? c10::make_optional(DimnameList(__names.value()))
|
| 98 |
+
# : c10::nullopt;
|
| 99 |
+
#
|
| 100 |
+
# Where does 'options' come from? It involves special local init
|
| 101 |
+
# for TensorOptions. Note that Python side has the additional
|
| 102 |
+
# 'requires_grad' field:
|
| 103 |
+
#
|
| 104 |
+
# const auto options = TensorOptions()
|
| 105 |
+
# .dtype(_r.scalartype(3))
|
| 106 |
+
# .device(_r.device(5))
|
| 107 |
+
# .layout(_r.layoutOptional(4))
|
| 108 |
+
# .requires_grad(_r.toBool(7))
|
| 109 |
+
# .pinned_memory(_r.toBool(6));
|
| 110 |
+
#
|
| 111 |
+
# In some other cases one Python Argument can map to multiple C++
|
| 112 |
+
# Arguments. For example:
|
| 113 |
+
#
|
| 114 |
+
# aten::max.names_dim(Tensor self, Dimname dim, bool keepdim=False)
|
| 115 |
+
# -> (Tensor values, Tensor indices)
|
| 116 |
+
#
|
| 117 |
+
# Python Args Cpp Args Binding Exprs
|
| 118 |
+
# ---------------------------------------------------------------------
|
| 119 |
+
# +----> max 'out[0]'
|
| 120 |
+
# /-----> max_values 'out[1]
|
| 121 |
+
# 0: input / self '_r.tensor(0)'
|
| 122 |
+
# 1: dim / dim '_r.dimname(1)'
|
| 123 |
+
# 2: keepdim / keepdim '_r.toBool(2)'
|
| 124 |
+
# 3: out -----+ [local init] out '_r.tensorlist_n<2>(3)'
|
| 125 |
+
#
|
| 126 |
+
# As demonstrated above, the binding can involve reordering,
|
| 127 |
+
# packing, unpacking and special local inits.
|
| 128 |
+
#
|
| 129 |
+
#
|
| 130 |
+
# Let's look at a concrete example:
|
| 131 |
+
#
|
| 132 |
+
# static PythonArgParser parser({
|
| 133 |
+
# "abs(Tensor input, *, Tensor out=None)",
|
| 134 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 135 |
+
# ^
|
| 136 |
+
# +--- Python Schema, represented by PythonSignature and PythonArgument
|
| 137 |
+
#
|
| 138 |
+
# }, /*traceable=*/true);
|
| 139 |
+
#
|
| 140 |
+
# ParsedArgs<2> parsed_args;
|
| 141 |
+
# auto _r = parser.parse(nullptr, args, kwargs, parsed_args);
|
| 142 |
+
#
|
| 143 |
+
# ...
|
| 144 |
+
#
|
| 145 |
+
# if (_r.isNone(1)) {
|
| 146 |
+
# ~~~~~~~~~~~~ <--- Scattered PythonArgParser output (arg name = 'out')
|
| 147 |
+
# represented by PythonArgParserOutputExpr
|
| 148 |
+
#
|
| 149 |
+
# // aten::abs(Tensor self) -> Tensor
|
| 150 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 151 |
+
# ^
|
| 152 |
+
# +--- NativeFunction schema, base version
|
| 153 |
+
#
|
| 154 |
+
# auto dispatch_abs = [](const Tensor & self) -> Tensor {
|
| 155 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 156 |
+
# ^
|
| 157 |
+
# +--- dispatch_lambda_args / dispatch_lambda_return_str
|
| 158 |
+
# generated from NativeFunction / CppSignature
|
| 159 |
+
# (deprecated PythonSignature is special)
|
| 160 |
+
# arguments are represented by DispatchLambdaArgument
|
| 161 |
+
#
|
| 162 |
+
# pybind11::gil_scoped_release no_gil;
|
| 163 |
+
# return self.abs();
|
| 164 |
+
# ~~~~~~~~~~~ <--- cpp_dispatch_target / cpp_dispatch_exprs
|
| 165 |
+
# generated from NativeFunction / CppSignature
|
| 166 |
+
# };
|
| 167 |
+
# return wrap(dispatch_abs(_r.tensor(0)));
|
| 168 |
+
# ~~~~~~~~~~~~~
|
| 169 |
+
# ^
|
| 170 |
+
# +--- dispatch_lambda_exprs
|
| 171 |
+
# binding PythonArgParserOutputExpr (python args)
|
| 172 |
+
# and DispatchLambdaArgument (c++ args)
|
| 173 |
+
#
|
| 174 |
+
# } else {
|
| 175 |
+
# // aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
| 176 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 177 |
+
# ^
|
| 178 |
+
# +--- NativeFunction schema, out-variant
|
| 179 |
+
#
|
| 180 |
+
# auto dispatch_abs_out = [](Tensor out, const Tensor & self) -> Tensor {
|
| 181 |
+
# pybind11::gil_scoped_release no_gil;
|
| 182 |
+
# return at::abs_out(out, self);
|
| 183 |
+
# };
|
| 184 |
+
# return wrap(dispatch_abs_out(_r.tensor(1), _r.tensor(0)));
|
| 185 |
+
# }
|
| 186 |
+
#
|
| 187 |
+
#
|
| 188 |
+
# [Notes] python interface codegen
|
| 189 |
+
# The python dataclasses below are used used to generate both python binding code
|
| 190 |
+
# and pyi type hint signatures.
|
| 191 |
+
# In theory these two should look very similar, but there are number of differences
|
| 192 |
+
# in how pyi signatures vs. python_arg_parser signatures are generated.
|
| 193 |
+
# These differences have been encapsulated in signature_str() vs. signature_str_pyi()
|
| 194 |
+
# to display the full signatures, and argument_str() vs argument_str_pyi() to display arguments.
|
| 195 |
+
# For examples, only pyi signatures include return types.
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
@dataclass(frozen=True)
|
| 199 |
+
class PythonReturns:
|
| 200 |
+
returns: Tuple[Return, ...]
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
@dataclass(frozen=True)
|
| 204 |
+
class PythonArgument:
|
| 205 |
+
name: str
|
| 206 |
+
type: Type
|
| 207 |
+
default: Optional[str]
|
| 208 |
+
|
| 209 |
+
# Used to generate the default init expr for some PythonArgParser outputs, e.g.:
|
| 210 |
+
#
|
| 211 |
+
# _r.layoutWithDefault(3, layout_from_backend(self.options().backend())))
|
| 212 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 213 |
+
# ^
|
| 214 |
+
# +--- default_init str
|
| 215 |
+
default_init: Optional[str]
|
| 216 |
+
|
| 217 |
+
# Compute argument formal for python argument parsing.
|
| 218 |
+
# Needs to be consistent with torch/csrc/utils/python_arg_parser.h.
|
| 219 |
+
def argument_str(self, *, method: bool = False, symint: bool = True) -> str:
|
| 220 |
+
type_str = (
|
| 221 |
+
argument_type_str(self.type, symint=symint)
|
| 222 |
+
.replace("const ", "")
|
| 223 |
+
.replace(" &", "")
|
| 224 |
+
)
|
| 225 |
+
|
| 226 |
+
name = self.name
|
| 227 |
+
# s/self/input/ outside method bindings
|
| 228 |
+
# [old codegen] TODO: remove this? doesn't rename in codegen, it's just
|
| 229 |
+
# for the parse string
|
| 230 |
+
if name == "self" and type_str in ["Tensor", "Number"] and not method:
|
| 231 |
+
name = "input"
|
| 232 |
+
|
| 233 |
+
# add default
|
| 234 |
+
if self.default is not None:
|
| 235 |
+
default = {
|
| 236 |
+
"nullptr": "None",
|
| 237 |
+
"c10::nullopt": "None",
|
| 238 |
+
"{}": "None",
|
| 239 |
+
}.get(self.default, self.default)
|
| 240 |
+
return f"{type_str} {name}={default}"
|
| 241 |
+
else:
|
| 242 |
+
return f"{type_str} {name}"
|
| 243 |
+
|
| 244 |
+
def argument_str_pyi(
|
| 245 |
+
self, *, method: bool = False, deprecated: bool = False
|
| 246 |
+
) -> str:
|
| 247 |
+
type_str = argument_type_str_pyi(self.type)
|
| 248 |
+
|
| 249 |
+
name = self.name
|
| 250 |
+
# s/self/input/ outside method bindings
|
| 251 |
+
# [old codegen] TODO: remove this? doesn't rename in codegen, it's just
|
| 252 |
+
# for the parse string
|
| 253 |
+
if name == "self" and type_str == "Tensor" and not method and not deprecated:
|
| 254 |
+
name = "input"
|
| 255 |
+
|
| 256 |
+
if name == "from": # from is a Python keyword...
|
| 257 |
+
name += "_"
|
| 258 |
+
|
| 259 |
+
# pyi merges the _out and functional variants into the same signature, with an optional out arg
|
| 260 |
+
if name == "out" and type_str == "Tensor" and not deprecated:
|
| 261 |
+
type_str = "Optional[" + type_str + "]"
|
| 262 |
+
|
| 263 |
+
# pyi deprecated signatures don't get defaults for their out arg
|
| 264 |
+
treat_as_no_default = (
|
| 265 |
+
deprecated
|
| 266 |
+
and isinstance(self, PythonOutArgument)
|
| 267 |
+
and self.default == "None"
|
| 268 |
+
)
|
| 269 |
+
|
| 270 |
+
# add default
|
| 271 |
+
if self.default is not None and not treat_as_no_default:
|
| 272 |
+
if (
|
| 273 |
+
isinstance(self.type, ListType)
|
| 274 |
+
and self.type.elem == BaseType(BaseTy.int)
|
| 275 |
+
and self.default.startswith("{")
|
| 276 |
+
and self.default.endswith("}")
|
| 277 |
+
):
|
| 278 |
+
default = "(" + self.default[1:-1] + ")"
|
| 279 |
+
else:
|
| 280 |
+
default = {
|
| 281 |
+
"nullptr": "None",
|
| 282 |
+
"c10::nullopt": "None",
|
| 283 |
+
"{}": "None",
|
| 284 |
+
"MemoryFormat::Contiguous": "contiguous_format",
|
| 285 |
+
"QScheme::PER_TENSOR_AFFINE": "per_tensor_affine",
|
| 286 |
+
}.get(self.default, self.default)
|
| 287 |
+
return f"{name}: {type_str}={default}"
|
| 288 |
+
else:
|
| 289 |
+
return f"{name}: {type_str}"
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
@dataclass(frozen=True)
|
| 293 |
+
class PythonOutArgument(PythonArgument):
|
| 294 |
+
# In Python signature multiple output fields are packed into one 'out' argument.
|
| 295 |
+
# When binding to C++, it's first binded to a local 'out' variable:
|
| 296 |
+
# 'auto out = _r.tensorlist_n<2>(2);',
|
| 297 |
+
# then binded to scattered C++ output arguments as 'out[0]', 'out[1]', and etc.
|
| 298 |
+
# TODO: maybe don't need keep scattered out fields for python signature?
|
| 299 |
+
outputs: Tuple[PythonArgument, ...]
|
| 300 |
+
|
| 301 |
+
@staticmethod
|
| 302 |
+
def from_outputs(
|
| 303 |
+
outputs: Tuple[PythonArgument, ...]
|
| 304 |
+
) -> Optional["PythonOutArgument"]:
|
| 305 |
+
if not outputs:
|
| 306 |
+
return None
|
| 307 |
+
|
| 308 |
+
size = len(outputs)
|
| 309 |
+
if size == 1:
|
| 310 |
+
return PythonOutArgument(
|
| 311 |
+
name=outputs[0].name,
|
| 312 |
+
type=outputs[0].type,
|
| 313 |
+
default="None",
|
| 314 |
+
default_init=None,
|
| 315 |
+
outputs=outputs,
|
| 316 |
+
)
|
| 317 |
+
elif size > 1:
|
| 318 |
+
if any(map(lambda a: not a.type.is_tensor_like(), outputs)):
|
| 319 |
+
raise RuntimeError(f"Unsupported output type: {outputs}")
|
| 320 |
+
return PythonOutArgument(
|
| 321 |
+
name="out",
|
| 322 |
+
# TODO: shouldn't this be OptionalType[ListType[...]], since it defaults to None?
|
| 323 |
+
type=ListType(BaseType(BaseTy.Tensor), size),
|
| 324 |
+
default="None",
|
| 325 |
+
default_init=None,
|
| 326 |
+
outputs=outputs,
|
| 327 |
+
)
|
| 328 |
+
raise AssertionError(r"Unexpected PythonOutArgument size")
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
@dataclass(frozen=True)
|
| 332 |
+
class PythonSignature:
|
| 333 |
+
# Base operator name, without inplace/outplace suffix.
|
| 334 |
+
name: str
|
| 335 |
+
|
| 336 |
+
# Positional arguments.
|
| 337 |
+
# TODO: create a dedicated SelfArgument type for 'self'?
|
| 338 |
+
input_args: Tuple[PythonArgument, ...]
|
| 339 |
+
|
| 340 |
+
# Keyword arguments excluding the 'out' argument and scattered kwargs belonging
|
| 341 |
+
# to TensorOptions (dtype, layout, device, pin_memory, requires_grad, etc).
|
| 342 |
+
input_kwargs: Tuple[PythonArgument, ...]
|
| 343 |
+
|
| 344 |
+
output_args: Optional[PythonOutArgument]
|
| 345 |
+
|
| 346 |
+
# Return types, which are only used by pyi
|
| 347 |
+
returns: PythonReturns
|
| 348 |
+
|
| 349 |
+
# These are scattered kwargs arguments belonging to TensorOptions.
|
| 350 |
+
# When binding to C++, they are packed into a TensorOptions object 'options'.
|
| 351 |
+
# It's possible that the C++ signature doesn't take TensorOptions object (e.g.
|
| 352 |
+
# for out variant), in which case they will be used as scattered fields without
|
| 353 |
+
# being packed into 'options'.
|
| 354 |
+
# TODO: maybe create a PythonTensorOptionsArgument?
|
| 355 |
+
tensor_options_args: Tuple[PythonArgument, ...]
|
| 356 |
+
|
| 357 |
+
# method or function signature?
|
| 358 |
+
method: bool
|
| 359 |
+
|
| 360 |
+
@property
|
| 361 |
+
def deprecated(self) -> bool:
|
| 362 |
+
return False
|
| 363 |
+
|
| 364 |
+
def arguments(
|
| 365 |
+
self, *, skip_outputs: bool = False, skip_tensor_options: bool = False
|
| 366 |
+
) -> Tuple[Union[PythonArgument, PythonOutArgument], ...]:
|
| 367 |
+
result: List[Union[PythonArgument, PythonOutArgument]] = []
|
| 368 |
+
result.extend(self.input_args)
|
| 369 |
+
result.extend(self.input_kwargs)
|
| 370 |
+
if self.output_args is not None and not skip_outputs:
|
| 371 |
+
result.append(self.output_args)
|
| 372 |
+
if not skip_tensor_options:
|
| 373 |
+
result.extend(self.tensor_options_args)
|
| 374 |
+
return tuple(result)
|
| 375 |
+
|
| 376 |
+
def arguments_count(self) -> int:
|
| 377 |
+
return len(self.arguments())
|
| 378 |
+
|
| 379 |
+
def output_idx(self) -> int:
|
| 380 |
+
return len(self.input_args) + len(self.input_kwargs)
|
| 381 |
+
|
| 382 |
+
# [old codegen] Compute the Python function signature for argument parsing,
|
| 383 |
+
# as specified in torch/csrc/utils/python_arg_parser.h. WARNING:
|
| 384 |
+
# this is NOT the same type signature as specified by PEP 484
|
| 385 |
+
# as understood by mypy; our format was independently developed
|
| 386 |
+
# and has some quirks to make it more suitable specifically
|
| 387 |
+
# for error parsing.
|
| 388 |
+
#
|
| 389 |
+
# For a translation to mypy-valid type signatures, see
|
| 390 |
+
# signature_str_pyi().
|
| 391 |
+
def signature_str(self, *, skip_outputs: bool = False, symint: bool = True) -> str:
|
| 392 |
+
args = self.arguments(skip_outputs=skip_outputs)
|
| 393 |
+
schema_formals: List[str] = list(
|
| 394 |
+
map(lambda a: a.argument_str(method=self.method, symint=symint), args)
|
| 395 |
+
)
|
| 396 |
+
positional_argc = len(self.input_args)
|
| 397 |
+
if len(schema_formals) > positional_argc:
|
| 398 |
+
schema_formals.insert(positional_argc, "*")
|
| 399 |
+
|
| 400 |
+
return f'{self.name}({", ".join(schema_formals)})'
|
| 401 |
+
|
| 402 |
+
def signature_str_pyi(self, *, skip_outputs: bool = False) -> str:
|
| 403 |
+
args = self.arguments(skip_outputs=skip_outputs)
|
| 404 |
+
schema_formals: List[str] = list(
|
| 405 |
+
map(lambda a: a.argument_str_pyi(method=self.method), args)
|
| 406 |
+
)
|
| 407 |
+
positional_argc = len(self.input_args)
|
| 408 |
+
if len(schema_formals) > positional_argc:
|
| 409 |
+
schema_formals.insert(positional_argc, "*")
|
| 410 |
+
|
| 411 |
+
# only pyi signatures include returns
|
| 412 |
+
returns_str = returns_str_pyi(self)
|
| 413 |
+
# pyi also includes self (with no typing/defaults) for methods
|
| 414 |
+
if self.method:
|
| 415 |
+
schema_formals.insert(0, "self")
|
| 416 |
+
return f'def {self.name}({", ".join(schema_formals)}) -> {returns_str}: ...'
|
| 417 |
+
|
| 418 |
+
def signature_str_pyi_vararg(self, *, skip_outputs: bool = False) -> Optional[str]:
|
| 419 |
+
# only pyi uses vararg signatures
|
| 420 |
+
args = self.arguments(skip_outputs=skip_outputs)
|
| 421 |
+
schema_formals: List[str] = list(
|
| 422 |
+
map(lambda a: a.argument_str_pyi(method=self.method), args)
|
| 423 |
+
)
|
| 424 |
+
# vararg only applies to pyi signatures. vararg variants are not generated for all signatures
|
| 425 |
+
num_args = self.arguments_count()
|
| 426 |
+
num_positionalargs = len(self.input_args)
|
| 427 |
+
|
| 428 |
+
have_vararg_version = False
|
| 429 |
+
if num_args > 0:
|
| 430 |
+
vararg_type = args[0].type
|
| 431 |
+
if (
|
| 432 |
+
isinstance(vararg_type, ListType)
|
| 433 |
+
and str(vararg_type.elem) in ["int", "SymInt"]
|
| 434 |
+
and num_positionalargs == 1
|
| 435 |
+
):
|
| 436 |
+
have_vararg_version = True
|
| 437 |
+
|
| 438 |
+
if not have_vararg_version:
|
| 439 |
+
return None
|
| 440 |
+
# Below are the major changes in vararg vs. regular pyi signatures
|
| 441 |
+
# vararg signatures also omit the asterix
|
| 442 |
+
schema_formals[0] = "*" + args[0].name + ": _int"
|
| 443 |
+
|
| 444 |
+
returns_str = returns_str_pyi(self)
|
| 445 |
+
# pyi also includes self (with no typing/defaults) for methods
|
| 446 |
+
if self.method:
|
| 447 |
+
schema_formals.insert(0, "self")
|
| 448 |
+
return f'def {self.name}({", ".join(schema_formals)}) -> {returns_str}: ...'
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
# The deprecated python signature involves some special logic, so create a
|
| 452 |
+
# dedicated data model to store these extra properties.
|
| 453 |
+
@dataclass(frozen=True)
|
| 454 |
+
class PythonSignatureDeprecated(PythonSignature):
|
| 455 |
+
# Schema for the deprecated function
|
| 456 |
+
deprecated_schema: FunctionSchema
|
| 457 |
+
|
| 458 |
+
# The deprecated signature might miss some arguments that the corresponding
|
| 459 |
+
# C++ signature expects. We need store the constant default values to pass in.
|
| 460 |
+
# For example:
|
| 461 |
+
# [deprecate signature]: addmm(Scalar beta, Tensor self, Tensor mat1, Tensor mat2)
|
| 462 |
+
# [func schema]: aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
|
| 463 |
+
# [func call]: self.addmm(mat1, mat2, beta, 1)
|
| 464 |
+
# We store ['self', 'mat1', 'mat2', 'beta', '1'] in this case.
|
| 465 |
+
deprecated_args_exprs: Tuple[str, ...]
|
| 466 |
+
|
| 467 |
+
@property
|
| 468 |
+
def deprecated(self) -> bool:
|
| 469 |
+
return True
|
| 470 |
+
|
| 471 |
+
def signature_str(self, *, skip_outputs: bool = False, symint: bool = True) -> str:
|
| 472 |
+
return (
|
| 473 |
+
PythonSignature.signature_str(
|
| 474 |
+
self, skip_outputs=skip_outputs, symint=symint
|
| 475 |
+
)
|
| 476 |
+
+ "|deprecated"
|
| 477 |
+
)
|
| 478 |
+
|
| 479 |
+
def signature_str_pyi(self, *, skip_outputs: bool = False) -> str:
|
| 480 |
+
args = self.arguments(skip_outputs=skip_outputs)
|
| 481 |
+
schema_formals: List[str] = list(
|
| 482 |
+
map(lambda a: a.argument_str_pyi(method=self.method, deprecated=True), args)
|
| 483 |
+
)
|
| 484 |
+
positional_argc = len(self.input_args)
|
| 485 |
+
if len(schema_formals) > positional_argc:
|
| 486 |
+
schema_formals.insert(positional_argc, "*")
|
| 487 |
+
|
| 488 |
+
returns_str = returns_str_pyi(self)
|
| 489 |
+
return f'def {self.name}({", ".join(schema_formals)}) -> {returns_str}: ...'
|
| 490 |
+
|
| 491 |
+
def signature_str_pyi_vararg(self, *, skip_outputs: bool = False) -> Optional[str]:
|
| 492 |
+
# the codegen doesn't include vararg variants for deprecated signatures
|
| 493 |
+
return None
|
| 494 |
+
|
| 495 |
+
|
| 496 |
+
# This struct is used to hold the PythonSignature and its corresponding
|
| 497 |
+
# NativeFunction BEFORE grouping base and out-variant functions.
|
| 498 |
+
# Why not store NativeFunction in PythonSignature or construct PythonSignature
|
| 499 |
+
# from NativeFunction? Because they are not 1-1 mapped.
|
| 500 |
+
# One native function could have both deprecated and non-deprecated python
|
| 501 |
+
# signatures - NativeFunction doesn't contain information to construct the
|
| 502 |
+
# deprecated python signature.
|
| 503 |
+
# One python signature is used to handle both the base and the out-variant
|
| 504 |
+
# function - see 'PythonSignatureGroup'.
|
| 505 |
+
@dataclass(frozen=True)
|
| 506 |
+
class PythonSignatureNativeFunctionPair:
|
| 507 |
+
signature: PythonSignature
|
| 508 |
+
function: NativeFunction
|
| 509 |
+
|
| 510 |
+
|
| 511 |
+
# We merge pairs of functions with signatures that are equivalent mod
|
| 512 |
+
# output arguments, and use a single entry in the python_arg_parser sig
|
| 513 |
+
# list for both (output arguments become optional).
|
| 514 |
+
@dataclass(frozen=True)
|
| 515 |
+
class PythonSignatureGroup:
|
| 516 |
+
# The signature used for Python argument parsing. The outplace signature
|
| 517 |
+
# is preferred if exists, because it can be used to parse inputs for both
|
| 518 |
+
# the out-place variant and the base version (with output omitted).
|
| 519 |
+
signature: PythonSignature
|
| 520 |
+
|
| 521 |
+
# The regular ATen declaration (e.g. conv2d)
|
| 522 |
+
base: NativeFunction
|
| 523 |
+
|
| 524 |
+
# The out variant (e.g. conv2d_out)
|
| 525 |
+
outplace: Optional[NativeFunction]
|
| 526 |
+
|
| 527 |
+
@classmethod
|
| 528 |
+
def from_pairs(
|
| 529 |
+
cls,
|
| 530 |
+
functional: PythonSignatureNativeFunctionPair,
|
| 531 |
+
out: Optional[PythonSignatureNativeFunctionPair],
|
| 532 |
+
) -> "PythonSignatureGroup":
|
| 533 |
+
if out is None:
|
| 534 |
+
return PythonSignatureGroup(
|
| 535 |
+
signature=functional.signature,
|
| 536 |
+
base=functional.function,
|
| 537 |
+
outplace=None,
|
| 538 |
+
)
|
| 539 |
+
|
| 540 |
+
# prefer the signature with optional out=... arguments because it's the
|
| 541 |
+
# superset that can be used to parse input for both base and outplace.
|
| 542 |
+
signature_kwargs = out.signature.__dict__.copy()
|
| 543 |
+
|
| 544 |
+
# Out overloads in C++ don't have TensorOptions arguments,
|
| 545 |
+
# so take these from the functional variant
|
| 546 |
+
signature_kwargs[
|
| 547 |
+
"tensor_options_args"
|
| 548 |
+
] = functional.signature.tensor_options_args
|
| 549 |
+
|
| 550 |
+
return PythonSignatureGroup(
|
| 551 |
+
signature=type(out.signature)(**signature_kwargs),
|
| 552 |
+
base=functional.function,
|
| 553 |
+
outplace=out.function,
|
| 554 |
+
)
|
| 555 |
+
|
| 556 |
+
|
| 557 |
+
# C++ function dispatch is wrapped in a lambda function. The lambda function
|
| 558 |
+
# has almost the same signature as the C++ function, only with some small
|
| 559 |
+
# variants - see details below.
|
| 560 |
+
# This data model is used to represent arguments of the lambda function
|
| 561 |
+
# signature.
|
| 562 |
+
@dataclass(frozen=True)
|
| 563 |
+
class DispatchLambdaArgument:
|
| 564 |
+
name: str
|
| 565 |
+
type_str: str
|
| 566 |
+
is_out_arg: bool
|
| 567 |
+
|
| 568 |
+
|
| 569 |
+
# To pass PyObjects arguments to C++ function (via the lambda wrapper),
|
| 570 |
+
# we need first convert PyObjects into simple C++ objects. This work
|
| 571 |
+
# is done by PythonArgParser.
|
| 572 |
+
# This data model is used to represent the output of PythonArgParser.
|
| 573 |
+
# It has 1-1 mapping with PythonArgument in PythonSignature.
|
| 574 |
+
@dataclass(frozen=True)
|
| 575 |
+
class PythonArgParserOutputExpr:
|
| 576 |
+
# argument name
|
| 577 |
+
name: str
|
| 578 |
+
|
| 579 |
+
# RHS expression to reference PythonArgParser output.
|
| 580 |
+
expr: str
|
| 581 |
+
|
| 582 |
+
# In some special cases we need create different expr, e.g.:
|
| 583 |
+
# '_r.isNone(1)' instead of '_r.tensor(1)'.
|
| 584 |
+
index: int
|
| 585 |
+
|
| 586 |
+
# The python argument it maps to.
|
| 587 |
+
argument: PythonArgument
|
| 588 |
+
|
| 589 |
+
@property
|
| 590 |
+
def is_none_expr(self) -> str:
|
| 591 |
+
return f"_r.isNone({self.index})"
|
| 592 |
+
|
| 593 |
+
|
| 594 |
+
# To pass PythonArgParser output to the lambda wrapper, we need bind
|
| 595 |
+
# PythonArgParserOutputExpr to DispatchLambdaArgument.
|
| 596 |
+
# They are not always 1-1 mapped, e.g. scattered TensorOptions fields
|
| 597 |
+
# need be packed into a TensorOptions object, which is the argument
|
| 598 |
+
# that the lambda function wrapper takes.
|
| 599 |
+
@dataclass(frozen=True)
|
| 600 |
+
class DispatchLambdaArgumentExprs:
|
| 601 |
+
# The exprs that provide the binding for lambda arguments, e.g.:
|
| 602 |
+
#
|
| 603 |
+
# 'self' -> '_r.tensor(0)'
|
| 604 |
+
# 'min' -> 'out[0]' / 'min_indices' -> 'out[1]'
|
| 605 |
+
# 'options' -> 'options'
|
| 606 |
+
#
|
| 607 |
+
# It has 1-1 mapping with DispatchLambdaArgument.
|
| 608 |
+
exprs: Sequence[str]
|
| 609 |
+
|
| 610 |
+
# Special local inits, which might introduce new variables that
|
| 611 |
+
# the 'exprs' above reference, e.g.:
|
| 612 |
+
#
|
| 613 |
+
# 'auto out = _r.tensorlist_n<2>(2);'
|
| 614 |
+
#
|
| 615 |
+
inits: Sequence[str]
|
| 616 |
+
|
| 617 |
+
|
| 618 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 619 |
+
#
|
| 620 |
+
# Helper Functions
|
| 621 |
+
#
|
| 622 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 623 |
+
|
| 624 |
+
|
| 625 |
+
def _cpp_signature(f: NativeFunction, *, method: bool = False) -> CppSignature:
|
| 626 |
+
return CppSignatureGroup.from_native_function(f, method=method).signature
|
| 627 |
+
|
| 628 |
+
|
| 629 |
+
def has_tensor_options(f: NativeFunction) -> bool:
|
| 630 |
+
return f.func.arguments.tensor_options is not None
|
| 631 |
+
|
| 632 |
+
|
| 633 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 634 |
+
#
|
| 635 |
+
# Python Signature
|
| 636 |
+
#
|
| 637 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 638 |
+
|
| 639 |
+
# 'simple_type' was introduced by the old codegen, which is slightly
|
| 640 |
+
# different from the python schema type, e.g.: doesn't have '?' suffix
|
| 641 |
+
# for optional Tensor/TensorList; doesn't have '[size]' suffix for list type.
|
| 642 |
+
def argument_type_str(
|
| 643 |
+
t: Type, *, simple_type: bool = False, symint: bool = True
|
| 644 |
+
) -> str:
|
| 645 |
+
if isinstance(t, BaseType):
|
| 646 |
+
if t.name == BaseTy.Tensor:
|
| 647 |
+
return "Tensor"
|
| 648 |
+
elif t.name == BaseTy.int:
|
| 649 |
+
return "int64_t"
|
| 650 |
+
elif t.name == BaseTy.float:
|
| 651 |
+
return "double"
|
| 652 |
+
elif t.name == BaseTy.str:
|
| 653 |
+
return "c10::string_view"
|
| 654 |
+
elif t.name in [
|
| 655 |
+
BaseTy.bool,
|
| 656 |
+
BaseTy.QScheme,
|
| 657 |
+
BaseTy.Scalar,
|
| 658 |
+
BaseTy.ScalarType,
|
| 659 |
+
BaseTy.Generator,
|
| 660 |
+
BaseTy.Storage,
|
| 661 |
+
BaseTy.Layout,
|
| 662 |
+
BaseTy.Device,
|
| 663 |
+
BaseTy.MemoryFormat,
|
| 664 |
+
BaseTy.Dimname,
|
| 665 |
+
BaseTy.Stream,
|
| 666 |
+
BaseTy.ConstQuantizerPtr,
|
| 667 |
+
BaseTy.SymInt,
|
| 668 |
+
]:
|
| 669 |
+
# These python schema type names line up with their function schema names
|
| 670 |
+
return t.name.name
|
| 671 |
+
|
| 672 |
+
elif isinstance(t, OptionalType):
|
| 673 |
+
if str(t.elem) == "Tensor":
|
| 674 |
+
# Is it desired to keep '?' for simple_type with new style dispatcher?
|
| 675 |
+
return "Tensor?"
|
| 676 |
+
elem = argument_type_str(t.elem, simple_type=simple_type, symint=symint)
|
| 677 |
+
return f"{elem}?"
|
| 678 |
+
elif isinstance(t, ListType):
|
| 679 |
+
size = t.size if not simple_type else None
|
| 680 |
+
if str(t.elem) == "bool":
|
| 681 |
+
assert t.size is not None
|
| 682 |
+
return f"::std::array<bool,{t.size}>"
|
| 683 |
+
elif str(t.elem) == "int":
|
| 684 |
+
return f"IntArrayRef[{size}]" if size is not None else "IntArrayRef"
|
| 685 |
+
elif str(t.elem) == "SymInt":
|
| 686 |
+
if symint:
|
| 687 |
+
return (
|
| 688 |
+
f"SymIntArrayRef[{size}]" if size is not None else "SymIntArrayRef"
|
| 689 |
+
)
|
| 690 |
+
else:
|
| 691 |
+
return f"IntArrayRef[{size}]" if size is not None else "IntArrayRef"
|
| 692 |
+
elif str(t.elem) == "Tensor":
|
| 693 |
+
return f"TensorList[{size}]" if size is not None else "TensorList"
|
| 694 |
+
elif str(t.elem) == "Scalar":
|
| 695 |
+
return f"ScalarList[{size}]" if size is not None else "ScalarList"
|
| 696 |
+
elif str(t.elem) == "Tensor?":
|
| 697 |
+
if simple_type:
|
| 698 |
+
return "c10::List<c10::optional<Tensor>>"
|
| 699 |
+
else:
|
| 700 |
+
return "const c10::List<c10::optional<Tensor>> &"
|
| 701 |
+
elif str(t.elem) == "Dimname":
|
| 702 |
+
return f"DimnameList[{size}]" if size is not None else "DimnameList"
|
| 703 |
+
elem = argument_type_str(t.elem, simple_type=simple_type, symint=symint)
|
| 704 |
+
return f"ArrayRef<{elem}>"
|
| 705 |
+
|
| 706 |
+
raise RuntimeError(f"unrecognized type {repr(t)}")
|
| 707 |
+
|
| 708 |
+
|
| 709 |
+
def argument_type_size(t: Type) -> Optional[int]:
|
| 710 |
+
l = t.is_list_like()
|
| 711 |
+
if l is not None and str(l.elem) != "bool":
|
| 712 |
+
return l.size
|
| 713 |
+
else:
|
| 714 |
+
return None
|
| 715 |
+
|
| 716 |
+
|
| 717 |
+
def argument(a: Argument) -> PythonArgument:
|
| 718 |
+
return PythonArgument(
|
| 719 |
+
name=a.name,
|
| 720 |
+
type=a.type,
|
| 721 |
+
# TODO: directly translate a.default to python default
|
| 722 |
+
default=str(
|
| 723 |
+
pythonify_default(cpp.default_expr(a.default, a.type, symint=False))
|
| 724 |
+
)
|
| 725 |
+
if a.default is not None
|
| 726 |
+
else None,
|
| 727 |
+
default_init=None,
|
| 728 |
+
)
|
| 729 |
+
|
| 730 |
+
|
| 731 |
+
# Generates a PythonSignature that can be used for either .pyi or PythonArgParser codegen
|
| 732 |
+
def signature(
|
| 733 |
+
f: NativeFunction, *, method: bool = False, pyi: bool = False
|
| 734 |
+
) -> PythonSignature:
|
| 735 |
+
return signature_from_schema(
|
| 736 |
+
f.func, category_override=f.category_override, method=method, pyi=pyi
|
| 737 |
+
)
|
| 738 |
+
|
| 739 |
+
|
| 740 |
+
def signature_from_schema(
|
| 741 |
+
func: FunctionSchema,
|
| 742 |
+
*,
|
| 743 |
+
category_override: Optional[str],
|
| 744 |
+
method: bool = False,
|
| 745 |
+
pyi: bool = False,
|
| 746 |
+
) -> PythonSignature:
|
| 747 |
+
args: List[Argument] = []
|
| 748 |
+
args.extend(func.arguments.pre_self_positional)
|
| 749 |
+
# Skip SelfArgument if this is method.
|
| 750 |
+
if not method and func.arguments.self_arg is not None:
|
| 751 |
+
args.append(func.arguments.self_arg.argument)
|
| 752 |
+
args.extend(func.arguments.post_self_positional)
|
| 753 |
+
args.extend(func.arguments.pre_tensor_options_kwarg_only)
|
| 754 |
+
# Skip TensorOptionsArguments. Python side TensorOptions
|
| 755 |
+
# arguments are created based on different rules - see below.
|
| 756 |
+
args.extend(func.arguments.post_tensor_options_kwarg_only)
|
| 757 |
+
args.extend(func.arguments.out)
|
| 758 |
+
|
| 759 |
+
input_arg_set = {a.name for a in func.arguments.flat_positional}
|
| 760 |
+
kwarg_only_set = {a.name for a in func.arguments.flat_kwarg_only}
|
| 761 |
+
out_arg_set = {a.name for a in func.arguments.out}
|
| 762 |
+
|
| 763 |
+
input_args = tuple(map(argument, filter(lambda a: a.name in input_arg_set, args)))
|
| 764 |
+
input_kwargs = tuple(
|
| 765 |
+
map(argument, filter(lambda a: a.name in kwarg_only_set, args))
|
| 766 |
+
)
|
| 767 |
+
outputs = tuple(map(argument, filter(lambda a: a.name in out_arg_set, args)))
|
| 768 |
+
|
| 769 |
+
# Reintroduce the scattered fields of TensorOptions for Python.
|
| 770 |
+
# Compared to the cpp counterpart, the python arguments have new property
|
| 771 |
+
# (default_init) and a new argument 'requires_grad', which require some
|
| 772 |
+
# special handlings.
|
| 773 |
+
# [old codegen] TODO: because these aren't guaranteed to be 100% faithful
|
| 774 |
+
# to the original versions in the yaml, this recreation is a potential
|
| 775 |
+
# source of drift between eager and JIT. Pull this logic out to a shared place.
|
| 776 |
+
|
| 777 |
+
has_tensor_input_arg = any(
|
| 778 |
+
a.type.is_tensor_like() for a in func.arguments.flat_non_out
|
| 779 |
+
)
|
| 780 |
+
if any(a.name == "requires_grad" for a in func.schema_order_arguments()):
|
| 781 |
+
raise ValueError(
|
| 782 |
+
"argument named requires_grad is reserved, should not explicitly add it in the schema"
|
| 783 |
+
)
|
| 784 |
+
|
| 785 |
+
# [old codegen] this probably won't work if one of the returns is not a tensor,
|
| 786 |
+
# but it will produce a compile-time error that is obvious.
|
| 787 |
+
has_tensor_return = any(r.type.is_tensor_like() for r in func.returns)
|
| 788 |
+
|
| 789 |
+
name: str = cpp.name(func)
|
| 790 |
+
is_factory_function = category_override == "factory" or (
|
| 791 |
+
has_tensor_return and not has_tensor_input_arg
|
| 792 |
+
)
|
| 793 |
+
is_like_or_new_function = (
|
| 794 |
+
category_override in ("new", "like")
|
| 795 |
+
or name.startswith("new_")
|
| 796 |
+
or name.endswith("_like")
|
| 797 |
+
)
|
| 798 |
+
|
| 799 |
+
tensor_options_args: List[PythonArgument] = []
|
| 800 |
+
if is_factory_function or is_like_or_new_function:
|
| 801 |
+
|
| 802 |
+
def topt_default_init(name: str) -> Optional[str]:
|
| 803 |
+
topt_args = func.arguments.tensor_options
|
| 804 |
+
if topt_args is None:
|
| 805 |
+
return None
|
| 806 |
+
a = getattr(topt_args, name)
|
| 807 |
+
if a.default is None or a.default == "None":
|
| 808 |
+
return None
|
| 809 |
+
return cpp.default_expr(a.default, a.type, symint=False)
|
| 810 |
+
|
| 811 |
+
tensor_options_args.append(
|
| 812 |
+
PythonArgument(
|
| 813 |
+
name="dtype",
|
| 814 |
+
type=OptionalType(BaseType(BaseTy.ScalarType)),
|
| 815 |
+
default="None",
|
| 816 |
+
default_init=(
|
| 817 |
+
"self.scalar_type()"
|
| 818 |
+
if is_like_or_new_function
|
| 819 |
+
else topt_default_init("dtype")
|
| 820 |
+
),
|
| 821 |
+
)
|
| 822 |
+
)
|
| 823 |
+
tensor_options_args.append(
|
| 824 |
+
PythonArgument(
|
| 825 |
+
name="layout",
|
| 826 |
+
type=OptionalType(BaseType(BaseTy.Layout)),
|
| 827 |
+
default="None",
|
| 828 |
+
default_init=(
|
| 829 |
+
"self.layout()"
|
| 830 |
+
if is_like_or_new_function
|
| 831 |
+
else topt_default_init("layout")
|
| 832 |
+
),
|
| 833 |
+
)
|
| 834 |
+
)
|
| 835 |
+
tensor_options_args.append(
|
| 836 |
+
PythonArgument(
|
| 837 |
+
name="device",
|
| 838 |
+
type=OptionalType(BaseType(BaseTy.Device)),
|
| 839 |
+
default="None",
|
| 840 |
+
default_init=(
|
| 841 |
+
"self.device()"
|
| 842 |
+
if is_like_or_new_function
|
| 843 |
+
else (
|
| 844 |
+
topt_default_init("device")
|
| 845 |
+
or "torch::tensors::get_default_device()"
|
| 846 |
+
)
|
| 847 |
+
),
|
| 848 |
+
)
|
| 849 |
+
)
|
| 850 |
+
tensor_options_args.append(
|
| 851 |
+
PythonArgument(
|
| 852 |
+
name="pin_memory",
|
| 853 |
+
type=OptionalType(BaseType(BaseTy.bool)),
|
| 854 |
+
default="False",
|
| 855 |
+
default_init=None,
|
| 856 |
+
)
|
| 857 |
+
)
|
| 858 |
+
tensor_options_args.append(
|
| 859 |
+
PythonArgument(
|
| 860 |
+
name="requires_grad",
|
| 861 |
+
type=OptionalType(BaseType(BaseTy.bool)),
|
| 862 |
+
default="False",
|
| 863 |
+
default_init=None,
|
| 864 |
+
)
|
| 865 |
+
)
|
| 866 |
+
|
| 867 |
+
returns = PythonReturns(returns=func.returns)
|
| 868 |
+
|
| 869 |
+
return PythonSignature(
|
| 870 |
+
name=str(func.name.name),
|
| 871 |
+
input_args=input_args,
|
| 872 |
+
input_kwargs=input_kwargs,
|
| 873 |
+
output_args=PythonOutArgument.from_outputs(outputs),
|
| 874 |
+
tensor_options_args=tuple(tensor_options_args),
|
| 875 |
+
returns=returns,
|
| 876 |
+
method=method,
|
| 877 |
+
)
|
| 878 |
+
|
| 879 |
+
|
| 880 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 881 |
+
#
|
| 882 |
+
# Python Interface
|
| 883 |
+
#
|
| 884 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 885 |
+
|
| 886 |
+
|
| 887 |
+
def namedtuple_fieldnames(returns: Tuple[Return, ...]) -> List[str]:
|
| 888 |
+
if len(returns) <= 1 or all(map(lambda r: r.name is None, returns)):
|
| 889 |
+
return []
|
| 890 |
+
else:
|
| 891 |
+
if any(map(lambda r: r.name is None, returns)):
|
| 892 |
+
# When building on Windows, `PyStructSequence_UnnamedField` could not be
|
| 893 |
+
# resolved by the linker for some reason, which cause error in building:
|
| 894 |
+
#
|
| 895 |
+
# python_nn_functions.cpp.obj : error LNK2001: unresolved external symbol
|
| 896 |
+
# PyStructSequence_UnnamedField
|
| 897 |
+
#
|
| 898 |
+
# Thus, at this point in time, we do not support unnamed
|
| 899 |
+
# fields in namedtuple; you must either name all fields,
|
| 900 |
+
# or none of them.
|
| 901 |
+
raise ValueError("Unnamed field is not supported by codegen")
|
| 902 |
+
|
| 903 |
+
return list(map(lambda r: str(r.name), returns))
|
| 904 |
+
|
| 905 |
+
|
| 906 |
+
def argument_type_str_pyi(t: Type) -> str:
|
| 907 |
+
add_optional = False
|
| 908 |
+
if isinstance(t, OptionalType):
|
| 909 |
+
t = t.elem
|
| 910 |
+
add_optional = True
|
| 911 |
+
|
| 912 |
+
if isinstance(t, BaseType):
|
| 913 |
+
if t.name == BaseTy.int:
|
| 914 |
+
ret = "_int"
|
| 915 |
+
if t.name == BaseTy.SymInt:
|
| 916 |
+
ret = "Union[_int, SymInt]"
|
| 917 |
+
elif t.name == BaseTy.float:
|
| 918 |
+
ret = "_float"
|
| 919 |
+
elif t.name == BaseTy.str:
|
| 920 |
+
ret = "str"
|
| 921 |
+
elif t.name == BaseTy.Scalar:
|
| 922 |
+
ret = "Number"
|
| 923 |
+
elif t.name == BaseTy.ScalarType:
|
| 924 |
+
ret = "_dtype"
|
| 925 |
+
elif t.name == BaseTy.bool:
|
| 926 |
+
ret = "_bool"
|
| 927 |
+
elif t.name == BaseTy.QScheme:
|
| 928 |
+
ret = "_qscheme"
|
| 929 |
+
elif t.name == BaseTy.Layout:
|
| 930 |
+
ret = "_layout"
|
| 931 |
+
elif t.name == BaseTy.Device:
|
| 932 |
+
ret = "Union[_device, str, None]"
|
| 933 |
+
elif t.name == BaseTy.MemoryFormat:
|
| 934 |
+
ret = "memory_format"
|
| 935 |
+
elif t.name == BaseTy.Dimname:
|
| 936 |
+
ret = "Union[str, ellipsis, None]"
|
| 937 |
+
elif t.name in [BaseTy.Tensor, BaseTy.Generator, BaseTy.Storage, BaseTy.Stream]:
|
| 938 |
+
# These python schema type names line up with their function schema names
|
| 939 |
+
ret = t.name.name
|
| 940 |
+
|
| 941 |
+
elif isinstance(t, ListType):
|
| 942 |
+
if str(t.elem) == "int":
|
| 943 |
+
ret = "Union[_int, _size]" if t.size is not None else "_size"
|
| 944 |
+
elif t.is_tensor_like():
|
| 945 |
+
# TODO: this doesn't seem right...
|
| 946 |
+
# Tensor?[] currently translates to Optional[Union[Tuple[Tensor, ...], List[Tensor]]]
|
| 947 |
+
# It should probably translate to Union[Tuple[Optional[Tensor], ...], List[Optional[Tensor]]]
|
| 948 |
+
if isinstance(t.elem, OptionalType):
|
| 949 |
+
add_optional = True
|
| 950 |
+
ret = (
|
| 951 |
+
"Union[Tensor, Tuple[Tensor, ...], List[Tensor]]"
|
| 952 |
+
if t.size is not None
|
| 953 |
+
else "Union[Tuple[Tensor, ...], List[Tensor]]"
|
| 954 |
+
)
|
| 955 |
+
elif str(t.elem) == "float":
|
| 956 |
+
ret = "Sequence[_float]"
|
| 957 |
+
else:
|
| 958 |
+
elem = argument_type_str_pyi(t.elem)
|
| 959 |
+
ret = f"Sequence[{elem}]"
|
| 960 |
+
|
| 961 |
+
else:
|
| 962 |
+
raise RuntimeError(f"unrecognized type {repr(t)}")
|
| 963 |
+
|
| 964 |
+
if add_optional:
|
| 965 |
+
ret = "Optional[" + ret + "]"
|
| 966 |
+
|
| 967 |
+
return ret
|
| 968 |
+
|
| 969 |
+
|
| 970 |
+
def return_type_str_pyi(t: Type) -> str:
|
| 971 |
+
# Where arguments are open to accepting Union, return types should return
|
| 972 |
+
# concrete types
|
| 973 |
+
|
| 974 |
+
if isinstance(t, OptionalType):
|
| 975 |
+
inner = return_type_str_pyi(t.elem)
|
| 976 |
+
return f"Optional[{inner}]"
|
| 977 |
+
|
| 978 |
+
if isinstance(t, BaseType):
|
| 979 |
+
if t.name == BaseTy.Device:
|
| 980 |
+
return "_device"
|
| 981 |
+
elif t.name == BaseTy.Dimname:
|
| 982 |
+
ret = "Optional[str]"
|
| 983 |
+
else:
|
| 984 |
+
return argument_type_str_pyi(t)
|
| 985 |
+
|
| 986 |
+
if isinstance(t, ListType):
|
| 987 |
+
inner = return_type_str_pyi(t.elem)
|
| 988 |
+
return f"List[{inner}]"
|
| 989 |
+
|
| 990 |
+
return argument_type_str_pyi(t)
|
| 991 |
+
|
| 992 |
+
|
| 993 |
+
def returns_named_tuple_pyi(signature: PythonSignature) -> Optional[Tuple[str, str]]:
|
| 994 |
+
python_returns = [return_type_str_pyi(r.type) for r in signature.returns.returns]
|
| 995 |
+
namedtuple_name = signature.name
|
| 996 |
+
field_names = namedtuple_fieldnames(signature.returns.returns)
|
| 997 |
+
if field_names:
|
| 998 |
+
tuple_args = [
|
| 999 |
+
f'("{name}", {typ})' for name, typ in zip(field_names, python_returns)
|
| 1000 |
+
]
|
| 1001 |
+
namedtuple_def = f'NamedTuple("{namedtuple_name}", [{", ".join(tuple_args)}])'
|
| 1002 |
+
return namedtuple_name, namedtuple_def
|
| 1003 |
+
return None
|
| 1004 |
+
|
| 1005 |
+
|
| 1006 |
+
def returns_str_pyi(signature: PythonSignature) -> str:
|
| 1007 |
+
field_names = namedtuple_fieldnames(signature.returns.returns)
|
| 1008 |
+
if field_names:
|
| 1009 |
+
return f"torch.return_types.{signature.name}"
|
| 1010 |
+
|
| 1011 |
+
python_returns = [return_type_str_pyi(r.type) for r in signature.returns.returns]
|
| 1012 |
+
if len(python_returns) > 1:
|
| 1013 |
+
return "Tuple[" + ", ".join(python_returns) + "]"
|
| 1014 |
+
if len(python_returns) == 1:
|
| 1015 |
+
return python_returns[0]
|
| 1016 |
+
return "None"
|
| 1017 |
+
|
| 1018 |
+
|
| 1019 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 1020 |
+
#
|
| 1021 |
+
# C++ Function Dispatch
|
| 1022 |
+
#
|
| 1023 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 1024 |
+
# This section provides APIs to generate the code that does C++ function
|
| 1025 |
+
# dispatch. The C++ function call is wrapped by a lambda function.
|
| 1026 |
+
# For example:
|
| 1027 |
+
#
|
| 1028 |
+
# // aten::selu_(Tensor(a!) self) -> Tensor(a!)
|
| 1029 |
+
# auto dispatch_selu_ = [](Tensor self) -> Tensor {
|
| 1030 |
+
# pybind11::gil_scoped_release no_gil;
|
| 1031 |
+
# return at::selu_(self);
|
| 1032 |
+
# };
|
| 1033 |
+
#
|
| 1034 |
+
# The lambda function's signature follows the C++ signature in common
|
| 1035 |
+
# cases, e.g.:
|
| 1036 |
+
#
|
| 1037 |
+
# // aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
|
| 1038 |
+
# [](const Tensor & self, const Tensor & other, Scalar alpha) -> Tensor
|
| 1039 |
+
#
|
| 1040 |
+
# For out variant the 'out' argument's type is changed from 'Tensor &'
|
| 1041 |
+
# to 'Tensor'. It's because when calling the lambda it passes in the
|
| 1042 |
+
# PythonArgParser output '_r.tensor(3)', which is stack allocated object
|
| 1043 |
+
# and needs to pass by value. Also see comments in 'dispatch_lambda_return_str()'.
|
| 1044 |
+
#
|
| 1045 |
+
# // aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
|
| 1046 |
+
# [](Tensor out, const Tensor & self, const Tensor & other, Scalar alpha) -> Tensor
|
| 1047 |
+
#
|
| 1048 |
+
# For multi-output case it can keep using reference type because the
|
| 1049 |
+
# PythonArgParser output has been unpacked to local variables, e.g.:
|
| 1050 |
+
#
|
| 1051 |
+
# // aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *,
|
| 1052 |
+
# // Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
|
| 1053 |
+
# [](Tensor & max, Tensor & max_values, const Tensor & self, Dimname dim, bool keepdim) -> std::tuple<Tensor,Tensor>
|
| 1054 |
+
#
|
| 1055 |
+
# For deprecated python signature, it should follow deprecated python arg order.
|
| 1056 |
+
# TODO: This is to keep same byte-for-byte result as the old codegen - maybe unnecessary?
|
| 1057 |
+
|
| 1058 |
+
|
| 1059 |
+
def dispatch_lambda_args(
|
| 1060 |
+
ps: PythonSignature, f: NativeFunction, symint: bool = True
|
| 1061 |
+
) -> Tuple[DispatchLambdaArgument, ...]:
|
| 1062 |
+
if isinstance(ps, PythonSignatureDeprecated):
|
| 1063 |
+
schema = ps.deprecated_schema
|
| 1064 |
+
else:
|
| 1065 |
+
schema = f.func
|
| 1066 |
+
|
| 1067 |
+
# Start with cpp arguments - dispatch lambda signature always include 'self'
|
| 1068 |
+
cpp_args = cpp.arguments(
|
| 1069 |
+
arguments=schema.arguments,
|
| 1070 |
+
faithful=False,
|
| 1071 |
+
symint=symint,
|
| 1072 |
+
method=False,
|
| 1073 |
+
cpp_no_default_args=f.cpp_no_default_args,
|
| 1074 |
+
)
|
| 1075 |
+
out_args: Set[str] = {a.name for a in schema.arguments.out}
|
| 1076 |
+
|
| 1077 |
+
# Convert from cpp argument to lambda argument
|
| 1078 |
+
def dispatch_lambda_arg(cpp_arg: Binding) -> DispatchLambdaArgument:
|
| 1079 |
+
type_str = cpp_arg.type
|
| 1080 |
+
is_out_arg = cpp_arg.name in out_args
|
| 1081 |
+
if ps.method and cpp_arg.name == "self":
|
| 1082 |
+
# For method's 'self', we can use 'const Tensor &' and simply ignore mutability!
|
| 1083 |
+
type_str = "const at::Tensor &"
|
| 1084 |
+
else:
|
| 1085 |
+
# For other cases we need prevent dangling refs to temps (unless it's
|
| 1086 |
+
# unpacked scattered output)
|
| 1087 |
+
# The reason is explained in the comments above and in 'dispatch_lambda_return_str()'.
|
| 1088 |
+
# TODO: avoid this special handling?
|
| 1089 |
+
ensure_temp_safe = len(out_args) <= 1 or not is_out_arg
|
| 1090 |
+
if ensure_temp_safe:
|
| 1091 |
+
type_str = {
|
| 1092 |
+
"at::Tensor &": "at::Tensor",
|
| 1093 |
+
}.get(type_str, type_str)
|
| 1094 |
+
return DispatchLambdaArgument(
|
| 1095 |
+
name=cpp_arg.name,
|
| 1096 |
+
type_str=type_str,
|
| 1097 |
+
is_out_arg=is_out_arg,
|
| 1098 |
+
)
|
| 1099 |
+
|
| 1100 |
+
return tuple(map(dispatch_lambda_arg, cpp_args))
|
| 1101 |
+
|
| 1102 |
+
|
| 1103 |
+
# [old codegen] XXX: if you got here because of an assertion failure, it doesn't mean
|
| 1104 |
+
# it's enough to just extend the list here. Before you do this, make sure
|
| 1105 |
+
# to add an appropriate wrap() overload in torch/csrc/autograd/utils/wrap_outputs.h.
|
| 1106 |
+
SUPPORTED_RETURN_TYPES = {
|
| 1107 |
+
"at::Tensor",
|
| 1108 |
+
"::std::tuple<at::Tensor,at::Tensor>",
|
| 1109 |
+
"::std::tuple<at::Tensor,at::Tensor,at::Tensor>",
|
| 1110 |
+
"::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor>",
|
| 1111 |
+
"::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor>",
|
| 1112 |
+
"::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor>",
|
| 1113 |
+
"::std::tuple<at::Tensor,at::Tensor,at::Tensor,int64_t>",
|
| 1114 |
+
"::std::tuple<at::Tensor,at::Tensor,double,int64_t>",
|
| 1115 |
+
"::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,int64_t>",
|
| 1116 |
+
"::std::tuple<at::Tensor,at::Tensor,double,at::Tensor,int64_t>",
|
| 1117 |
+
"::std::tuple<double,int64_t>",
|
| 1118 |
+
"::std::tuple<at::Tensor,::std::vector<at::Tensor>>",
|
| 1119 |
+
"::std::vector<at::Tensor>",
|
| 1120 |
+
# Needed for flash attention forw/backward
|
| 1121 |
+
"::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,int64_t,int64_t,int64_t,int64_t,at::Tensor>",
|
| 1122 |
+
"at::Scalar",
|
| 1123 |
+
"bool",
|
| 1124 |
+
"int64_t",
|
| 1125 |
+
"void*",
|
| 1126 |
+
"void",
|
| 1127 |
+
"at::QScheme",
|
| 1128 |
+
"double",
|
| 1129 |
+
"at::IntArrayRef",
|
| 1130 |
+
"at::ScalarType",
|
| 1131 |
+
}
|
| 1132 |
+
|
| 1133 |
+
|
| 1134 |
+
def dispatch_lambda_return_str(f: NativeFunction) -> str:
|
| 1135 |
+
# [old codegen] Remove type annotation (e.g. 'Tensor' rather than 'Tensor &')
|
| 1136 |
+
# because the dispatch lambdas take mutable arguments *by value*, not
|
| 1137 |
+
# by reference. If you then return a reference to such an argument, you
|
| 1138 |
+
# will now have a pointer to a dangling stack entry. Not good.
|
| 1139 |
+
#
|
| 1140 |
+
# You want:
|
| 1141 |
+
#
|
| 1142 |
+
# auto dispatch_selu_ = [](Tensor self) -> Tensor { ...; return at::selu_(self); };
|
| 1143 |
+
# ^^^^^^
|
| 1144 |
+
#
|
| 1145 |
+
# *not*
|
| 1146 |
+
#
|
| 1147 |
+
# auto dispatch_selu_ = [](Tensor self) -> Tensor& { ...; return at::selu_(self); };
|
| 1148 |
+
# ^^^^^^^
|
| 1149 |
+
#
|
| 1150 |
+
# (NB: We can't make dispatch_selu_ take Tensor&, because the enclosing
|
| 1151 |
+
# codegen looks like dispatch_selu_(_r.tensor(0)), and you can't take a
|
| 1152 |
+
# mutable reference to temporary. Maybe we could assign it to a
|
| 1153 |
+
# variable itself.)
|
| 1154 |
+
returns_without_annotation = tuple(
|
| 1155 |
+
map(lambda r: Return(r.name, r.type, None), f.func.returns)
|
| 1156 |
+
)
|
| 1157 |
+
return_str = cpp.returns_type(returns_without_annotation, symint=True).cpp_type()
|
| 1158 |
+
if return_str not in SUPPORTED_RETURN_TYPES:
|
| 1159 |
+
raise RuntimeError(f"{f.func.name} returns unsupported type {return_str}")
|
| 1160 |
+
return return_str
|
| 1161 |
+
|
| 1162 |
+
|
| 1163 |
+
def cpp_dispatch_target(f: NativeFunction) -> str:
|
| 1164 |
+
symint = f.func.has_symint()
|
| 1165 |
+
name = cpp.name(f.func, symint_overload=symint)
|
| 1166 |
+
if Variant.method in f.variants:
|
| 1167 |
+
return f"self.{name}"
|
| 1168 |
+
if Variant.function in f.variants:
|
| 1169 |
+
if has_tensor_options(f) or f.func.name.name.base.endswith("_like"):
|
| 1170 |
+
namespace = "torch"
|
| 1171 |
+
else:
|
| 1172 |
+
namespace = "at"
|
| 1173 |
+
return f"{namespace}::{name}"
|
| 1174 |
+
raise RuntimeError(f"could not dispatch, neither function nor method: {f.func}")
|
| 1175 |
+
|
| 1176 |
+
|
| 1177 |
+
def cpp_dispatch_exprs(
|
| 1178 |
+
f: NativeFunction,
|
| 1179 |
+
*,
|
| 1180 |
+
python_signature: Optional[PythonSignature] = None,
|
| 1181 |
+
) -> Tuple[str, ...]:
|
| 1182 |
+
cpp_args: Sequence[Binding] = _cpp_signature(f, method=False).arguments()
|
| 1183 |
+
|
| 1184 |
+
exprs: Tuple[str, ...] = tuple()
|
| 1185 |
+
if not isinstance(python_signature, PythonSignatureDeprecated):
|
| 1186 |
+
# By default the exprs are consistent with the C++ signature.
|
| 1187 |
+
exprs = tuple(map(lambda a: a.name, cpp_args))
|
| 1188 |
+
else:
|
| 1189 |
+
# For deprecated python signature we may need fill in some constants.
|
| 1190 |
+
exprs = tuple(
|
| 1191 |
+
filter(
|
| 1192 |
+
lambda n: n != "out" or f.func.is_out_fn(),
|
| 1193 |
+
python_signature.deprecated_args_exprs,
|
| 1194 |
+
)
|
| 1195 |
+
)
|
| 1196 |
+
|
| 1197 |
+
if Variant.method in f.variants:
|
| 1198 |
+
exprs = tuple(filter("self".__ne__, exprs))
|
| 1199 |
+
|
| 1200 |
+
return exprs
|
| 1201 |
+
|
| 1202 |
+
|
| 1203 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 1204 |
+
#
|
| 1205 |
+
# Python / C++ Args Binding
|
| 1206 |
+
#
|
| 1207 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 1208 |
+
|
| 1209 |
+
# We explicitly enumerate the PythonArgParser unpacking methods for all
|
| 1210 |
+
# supported types. This might be more verbose than necessary, partially
|
| 1211 |
+
# because of the irregularity of unpacking method naming, partially
|
| 1212 |
+
# because we want to mimic the old codegen behavior - to reject
|
| 1213 |
+
# unexpected and/or unsupported cases which the old codegen rejects.
|
| 1214 |
+
# For certain cases it is intentionally more restrictive than necessary,
|
| 1215 |
+
# e.g.: it doesn't accepts doublelist with definite size.
|
| 1216 |
+
def arg_parser_unpack_method(
|
| 1217 |
+
t: Type, default: Optional[str], default_init: Optional[str], *, symint: bool = True
|
| 1218 |
+
) -> str:
|
| 1219 |
+
has_default_init = default_init is not None
|
| 1220 |
+
if has_default_init and str(t) not in (
|
| 1221 |
+
"ScalarType?",
|
| 1222 |
+
"ScalarType",
|
| 1223 |
+
"Device",
|
| 1224 |
+
"Device?",
|
| 1225 |
+
"Layout",
|
| 1226 |
+
"Layout?",
|
| 1227 |
+
"bool",
|
| 1228 |
+
"bool?",
|
| 1229 |
+
):
|
| 1230 |
+
raise RuntimeError(f"type '{t}' does not supported unpacking with default")
|
| 1231 |
+
|
| 1232 |
+
if isinstance(t, BaseType):
|
| 1233 |
+
if t.name in [
|
| 1234 |
+
BaseTy.Tensor,
|
| 1235 |
+
BaseTy.Stream,
|
| 1236 |
+
BaseTy.Storage,
|
| 1237 |
+
BaseTy.Scalar,
|
| 1238 |
+
BaseTy.Dimname,
|
| 1239 |
+
]:
|
| 1240 |
+
# These unpack methods line up with their schema names
|
| 1241 |
+
return t.name.name.lower()
|
| 1242 |
+
elif t.name == BaseTy.ScalarType:
|
| 1243 |
+
return "scalartypeWithDefault" if has_default_init else "scalartype"
|
| 1244 |
+
elif t.name == BaseTy.Device:
|
| 1245 |
+
return "deviceWithDefault" if has_default_init else "device"
|
| 1246 |
+
elif t.name == BaseTy.int:
|
| 1247 |
+
return "toInt64"
|
| 1248 |
+
elif t.name == BaseTy.SymInt:
|
| 1249 |
+
if symint:
|
| 1250 |
+
return "toSymInt"
|
| 1251 |
+
else:
|
| 1252 |
+
return "toInt64"
|
| 1253 |
+
elif t.name == BaseTy.bool:
|
| 1254 |
+
return "toBoolWithDefault" if has_default_init else "toBool"
|
| 1255 |
+
elif t.name == BaseTy.float:
|
| 1256 |
+
return "toDouble"
|
| 1257 |
+
elif t.name == BaseTy.str:
|
| 1258 |
+
return "stringView"
|
| 1259 |
+
elif t.name == BaseTy.Layout:
|
| 1260 |
+
return "layoutWithDefault" if has_default_init else "layout"
|
| 1261 |
+
elif t.name == BaseTy.MemoryFormat:
|
| 1262 |
+
return "memoryformat"
|
| 1263 |
+
|
| 1264 |
+
elif isinstance(t, OptionalType):
|
| 1265 |
+
if str(t.elem) == "Tensor":
|
| 1266 |
+
return "optionalTensor"
|
| 1267 |
+
elif str(t.elem) == "Generator":
|
| 1268 |
+
return "generator"
|
| 1269 |
+
elif str(t.elem) == "Dimname[]":
|
| 1270 |
+
return "toDimnameListOptional"
|
| 1271 |
+
elif not has_default_init and default in (None, "None", "c10::nullopt"):
|
| 1272 |
+
# If default is None: append 'Optional' to elem's unpacking method
|
| 1273 |
+
return (
|
| 1274 |
+
arg_parser_unpack_method(t.elem, None, None, symint=symint) + "Optional"
|
| 1275 |
+
)
|
| 1276 |
+
else:
|
| 1277 |
+
# Otherwise, load as underlying type with default
|
| 1278 |
+
return arg_parser_unpack_method(
|
| 1279 |
+
t.elem, default, default_init, symint=symint
|
| 1280 |
+
)
|
| 1281 |
+
|
| 1282 |
+
elif isinstance(t, ListType):
|
| 1283 |
+
if str(t.elem) == "Tensor":
|
| 1284 |
+
# accept and use definite size
|
| 1285 |
+
if t.size is not None:
|
| 1286 |
+
return f"tensorlist_n<{t.size}>"
|
| 1287 |
+
else:
|
| 1288 |
+
return "tensorlist"
|
| 1289 |
+
elif str(t.elem) == "Tensor?":
|
| 1290 |
+
return "list_of_optional_tensors"
|
| 1291 |
+
elif str(t.elem) == "Dimname":
|
| 1292 |
+
# accept definite size
|
| 1293 |
+
return "dimnamelist"
|
| 1294 |
+
elif str(t.elem) == "int":
|
| 1295 |
+
# accept definite size
|
| 1296 |
+
return "intlist"
|
| 1297 |
+
elif str(t) == "float[]":
|
| 1298 |
+
return "doublelist"
|
| 1299 |
+
elif str(t.elem) == "SymInt":
|
| 1300 |
+
# accept definite size
|
| 1301 |
+
if symint:
|
| 1302 |
+
return "symintlist"
|
| 1303 |
+
else:
|
| 1304 |
+
return "intlist"
|
| 1305 |
+
elif str(t) == "Scalar[]":
|
| 1306 |
+
return "scalarlist"
|
| 1307 |
+
raise RuntimeError(f"type '{t}' is not supported by PythonArgParser")
|
| 1308 |
+
|
| 1309 |
+
|
| 1310 |
+
# Return RHS expression for python argument using PythonArgParser output.
|
| 1311 |
+
# e.g. for arg name 'foo', arg type 'bool', arg_index = 2, returns '_r.toBool(2)'
|
| 1312 |
+
def arg_parser_output_expr(
|
| 1313 |
+
arg_index: int, a: PythonArgument, *, symint: bool = True
|
| 1314 |
+
) -> PythonArgParserOutputExpr:
|
| 1315 |
+
has_default = a.default_init is not None
|
| 1316 |
+
unpack_method = arg_parser_unpack_method(
|
| 1317 |
+
t=a.type, default=a.default, default_init=a.default_init, symint=symint
|
| 1318 |
+
)
|
| 1319 |
+
default = f", {a.default_init}" if has_default else ""
|
| 1320 |
+
expr = f"_r.{unpack_method}({arg_index}{default})"
|
| 1321 |
+
|
| 1322 |
+
return PythonArgParserOutputExpr(
|
| 1323 |
+
name=a.name,
|
| 1324 |
+
expr=expr,
|
| 1325 |
+
index=arg_index,
|
| 1326 |
+
argument=a,
|
| 1327 |
+
)
|
| 1328 |
+
|
| 1329 |
+
|
| 1330 |
+
# Returns a map with key = arg_name and value = PythonArgParserOutputExpr.
|
| 1331 |
+
def arg_parser_output_exprs(
|
| 1332 |
+
ps: PythonSignature, f: NativeFunction, *, symint: bool = True
|
| 1333 |
+
) -> Dict[str, PythonArgParserOutputExpr]:
|
| 1334 |
+
return {
|
| 1335 |
+
e.name: e
|
| 1336 |
+
for i, a in enumerate(ps.arguments())
|
| 1337 |
+
for e in (arg_parser_output_expr(i, a, symint=symint),)
|
| 1338 |
+
}
|
| 1339 |
+
|
| 1340 |
+
|
| 1341 |
+
# argument name to type for scattered tensor options fields
|
| 1342 |
+
TENSOR_OPTIONS_FIELDS = {
|
| 1343 |
+
"dtype": "ScalarType?",
|
| 1344 |
+
"device": "Device?",
|
| 1345 |
+
"layout": "Layout?",
|
| 1346 |
+
"pin_memory": "bool?",
|
| 1347 |
+
"requires_grad": "bool?",
|
| 1348 |
+
}
|
| 1349 |
+
|
| 1350 |
+
# bind arg parser outputs (python args) with dispatch lambda arguments (c++ args).
|
| 1351 |
+
def dispatch_lambda_exprs(
|
| 1352 |
+
ps: PythonSignature, f: NativeFunction, *, symint: bool = True
|
| 1353 |
+
) -> DispatchLambdaArgumentExprs:
|
| 1354 |
+
# This method is to bind 'arg_parser_outputs' and 'lambda_args' by producing
|
| 1355 |
+
# 'inits' and 'lambda_args_exprs' for each lambda argument using arg parser
|
| 1356 |
+
# outputs.
|
| 1357 |
+
arg_parser_outputs = arg_parser_output_exprs(ps, f, symint=symint)
|
| 1358 |
+
lambda_args = dispatch_lambda_args(ps, f, symint=symint)
|
| 1359 |
+
inits: List[str] = []
|
| 1360 |
+
lambda_args_exprs: Dict[str, str] = {}
|
| 1361 |
+
|
| 1362 |
+
has_toptions = has_tensor_options(f)
|
| 1363 |
+
|
| 1364 |
+
# 1. special inits/unpacking to provide binding exprs for lambda arguments.
|
| 1365 |
+
for a in ps.arguments(skip_tensor_options=True):
|
| 1366 |
+
name = a.name
|
| 1367 |
+
arg_parser_expr = arg_parser_outputs[a.name].expr
|
| 1368 |
+
|
| 1369 |
+
if has_toptions and name == "self":
|
| 1370 |
+
# TODO: why this needs to be special case?
|
| 1371 |
+
inits.extend(
|
| 1372 |
+
[
|
| 1373 |
+
f"auto self = {arg_parser_expr};",
|
| 1374 |
+
]
|
| 1375 |
+
)
|
| 1376 |
+
lambda_args_exprs[name] = name
|
| 1377 |
+
elif (
|
| 1378 |
+
isinstance(a, PythonOutArgument)
|
| 1379 |
+
and len(a.outputs) > 1
|
| 1380 |
+
and f.func.is_out_fn()
|
| 1381 |
+
):
|
| 1382 |
+
inits.extend(
|
| 1383 |
+
[
|
| 1384 |
+
f"auto out = {arg_parser_expr};",
|
| 1385 |
+
]
|
| 1386 |
+
)
|
| 1387 |
+
for i, out_arg in enumerate(a.outputs):
|
| 1388 |
+
lambda_args_exprs[out_arg.name] = f"out[{i}]"
|
| 1389 |
+
elif str(a.type) == "Dimname[]?":
|
| 1390 |
+
# [old codegen]
|
| 1391 |
+
# TODO: make this part of something more general, or get rid of it.
|
| 1392 |
+
# optional<ArrayRef<T>> are special. The PythonArgParser returns an
|
| 1393 |
+
# optional<vector<T>>, which cannot be implicitly converted to
|
| 1394 |
+
# optional<ArrayRef<T>>. One needs to unwrap the optional and rewrap.
|
| 1395 |
+
inits.extend(
|
| 1396 |
+
[
|
| 1397 |
+
f"auto __{name} = {arg_parser_expr};",
|
| 1398 |
+
f"c10::optional<DimnameList> {name} = __{name} ? c10::make_optional(DimnameList(__{name}.value())) : c10::nullopt;", # noqa: B950
|
| 1399 |
+
]
|
| 1400 |
+
)
|
| 1401 |
+
lambda_args_exprs[name] = name
|
| 1402 |
+
else:
|
| 1403 |
+
# default case - directly using PythonArgParser output expr
|
| 1404 |
+
lambda_args_exprs[name] = arg_parser_expr
|
| 1405 |
+
|
| 1406 |
+
# method's self is passed directly to python binding, rather than parsed
|
| 1407 |
+
if ps.method:
|
| 1408 |
+
lambda_args_exprs["self"] = "self"
|
| 1409 |
+
|
| 1410 |
+
# 2. special packing/checking for TensorOptions.
|
| 1411 |
+
tensor_options_args_names = list(map(lambda a: a.name, ps.tensor_options_args))
|
| 1412 |
+
if has_toptions:
|
| 1413 |
+
if f.func.is_out_fn():
|
| 1414 |
+
raise RuntimeError(f"{f.func}: tensor options with output arg")
|
| 1415 |
+
for a in ps.tensor_options_args:
|
| 1416 |
+
if a.name not in TENSOR_OPTIONS_FIELDS:
|
| 1417 |
+
raise RuntimeError(
|
| 1418 |
+
f"{f.func}: unrecognized tensor options field '{a.name}' in python binding arguments"
|
| 1419 |
+
)
|
| 1420 |
+
if str(a.type) != TENSOR_OPTIONS_FIELDS.get(a.name):
|
| 1421 |
+
raise RuntimeError(
|
| 1422 |
+
f"{f.func}: unrecognized type '{str(a.type)}' for tensor options field '{a.name}'"
|
| 1423 |
+
)
|
| 1424 |
+
if not all(
|
| 1425 |
+
map(lambda a: a in tensor_options_args_names, TENSOR_OPTIONS_FIELDS.keys())
|
| 1426 |
+
):
|
| 1427 |
+
raise RuntimeError(
|
| 1428 |
+
f"{f.func}: incomplete tensor options args: {tensor_options_args_names}"
|
| 1429 |
+
)
|
| 1430 |
+
|
| 1431 |
+
inits.append(
|
| 1432 |
+
f"""\
|
| 1433 |
+
const auto options = TensorOptions()
|
| 1434 |
+
.dtype({arg_parser_outputs['dtype'].expr})
|
| 1435 |
+
.device({arg_parser_outputs['device'].expr})
|
| 1436 |
+
.layout({arg_parser_outputs['layout'].expr})
|
| 1437 |
+
.requires_grad({arg_parser_outputs['requires_grad'].expr})
|
| 1438 |
+
.pinned_memory({arg_parser_outputs['pin_memory'].expr});
|
| 1439 |
+
torch::utils::maybe_initialize_cuda(options);
|
| 1440 |
+
"""
|
| 1441 |
+
)
|
| 1442 |
+
lambda_args_exprs["options"] = "options"
|
| 1443 |
+
|
| 1444 |
+
# 3. special case - access scattered TensorOptions fields without packing
|
| 1445 |
+
# TODO: maybe move to the generator side as it's not related to binding.
|
| 1446 |
+
if not has_toptions and tensor_options_args_names:
|
| 1447 |
+
if "dtype" in tensor_options_args_names:
|
| 1448 |
+
# we're an output-arg variant, check these args against output tensor
|
| 1449 |
+
if not f.func.is_out_fn():
|
| 1450 |
+
raise RuntimeError(
|
| 1451 |
+
f"{f.func}: dtype in tensor_options_args without output arg"
|
| 1452 |
+
)
|
| 1453 |
+
if not all(
|
| 1454 |
+
map(lambda a: a in tensor_options_args_names, ("layout", "device"))
|
| 1455 |
+
):
|
| 1456 |
+
raise RuntimeError(
|
| 1457 |
+
f"{f.func}: incomplete tensor options for output check"
|
| 1458 |
+
)
|
| 1459 |
+
|
| 1460 |
+
inits.append(
|
| 1461 |
+
f"""\
|
| 1462 |
+
check_out_type_matches({arg_parser_outputs['out'].expr}, {arg_parser_outputs['dtype'].expr},
|
| 1463 |
+
{arg_parser_outputs['dtype'].is_none_expr}, {arg_parser_outputs['layout'].expr},
|
| 1464 |
+
{arg_parser_outputs['device'].expr}, {arg_parser_outputs['device'].is_none_expr});
|
| 1465 |
+
"""
|
| 1466 |
+
)
|
| 1467 |
+
# we'll set requires_grad on outgoing tensor
|
| 1468 |
+
if "requires_grad" not in tensor_options_args_names:
|
| 1469 |
+
raise RuntimeError(
|
| 1470 |
+
f'{f.func}: expected "requires_grad" in tensor_options_args absent, but found [{tensor_options_args_names}]'
|
| 1471 |
+
)
|
| 1472 |
+
|
| 1473 |
+
return DispatchLambdaArgumentExprs(
|
| 1474 |
+
exprs=tuple(map(lambda a: lambda_args_exprs[a.name], lambda_args)),
|
| 1475 |
+
inits=inits,
|
| 1476 |
+
)
|
wemm/lib/python3.10/site-packages/torchgen/api/ufunc.py
ADDED
|
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass
|
| 2 |
+
from typing import List, Optional
|
| 3 |
+
|
| 4 |
+
import torchgen.api.types as api_types
|
| 5 |
+
|
| 6 |
+
from torchgen.api import cpp, structured
|
| 7 |
+
from torchgen.api.types import (
|
| 8 |
+
ArgName,
|
| 9 |
+
BaseCppType,
|
| 10 |
+
BaseCType,
|
| 11 |
+
Binding,
|
| 12 |
+
ConstRefCType,
|
| 13 |
+
CType,
|
| 14 |
+
NamedCType,
|
| 15 |
+
scalarT,
|
| 16 |
+
)
|
| 17 |
+
from torchgen.model import (
|
| 18 |
+
Argument,
|
| 19 |
+
BaseTy,
|
| 20 |
+
BaseType,
|
| 21 |
+
DispatchKey,
|
| 22 |
+
FunctionSchema,
|
| 23 |
+
NativeFunctionsGroup,
|
| 24 |
+
Type,
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def schema_kernel_name(func: FunctionSchema, dispatch_key: DispatchKey) -> str:
|
| 29 |
+
assert func.is_out_fn(), "ufunc.kernel_name should only be invoked on out schemas"
|
| 30 |
+
return f"ufunc_{func.name.name}_{dispatch_key}"
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def kernel_name(g: NativeFunctionsGroup, dispatch_key: DispatchKey) -> str:
|
| 34 |
+
return schema_kernel_name(g.out.func, dispatch_key)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
# Tensors are omitted (as they are stored in TensorIterator), everything else is
|
| 38 |
+
# passed along (technically, we can pass tensors along too, it just wastes
|
| 39 |
+
# argument registers)
|
| 40 |
+
#
|
| 41 |
+
# NB: used for CPU only
|
| 42 |
+
def dispatchstub_type(t: Type, *, binds: ArgName) -> Optional[NamedCType]:
|
| 43 |
+
# Dispatch stubs are always plain ints
|
| 44 |
+
r = cpp.valuetype_type(t, binds=binds, symint=False)
|
| 45 |
+
if r is not None:
|
| 46 |
+
return r
|
| 47 |
+
|
| 48 |
+
if t == BaseType(BaseTy.Scalar):
|
| 49 |
+
return NamedCType(binds, ConstRefCType(BaseCType(scalarT)))
|
| 50 |
+
elif t == BaseType(BaseTy.Tensor):
|
| 51 |
+
return None
|
| 52 |
+
else:
|
| 53 |
+
raise AssertionError(f"unrecognized type {repr(t)}")
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def opmath_type(scalar_t: BaseCppType) -> BaseCppType:
|
| 57 |
+
if scalar_t == api_types.scalar_t:
|
| 58 |
+
return api_types.opmath_t
|
| 59 |
+
raise NotImplementedError
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
# NB: Tensors in constructor are stored in opmath_t, not scalar_t
|
| 63 |
+
# because Tensor in constructor = its a scalar tensor partially applied =
|
| 64 |
+
# it can be higher precision and we want to compute in that higher precision
|
| 65 |
+
#
|
| 66 |
+
# NB: CUDA only
|
| 67 |
+
def ufunctor_ctor_type(t: Type, *, binds: ArgName, scalar_t: BaseCppType) -> NamedCType:
|
| 68 |
+
r = cpp.valuetype_type(t, binds=binds, symint=False)
|
| 69 |
+
if r is not None:
|
| 70 |
+
return r
|
| 71 |
+
|
| 72 |
+
if t == BaseType(BaseTy.Scalar):
|
| 73 |
+
return NamedCType(binds, BaseCType(opmath_type(scalar_t)))
|
| 74 |
+
elif t == BaseType(BaseTy.Tensor):
|
| 75 |
+
return NamedCType(binds, BaseCType(opmath_type(scalar_t)))
|
| 76 |
+
else:
|
| 77 |
+
raise AssertionError(f"unrecognized type {repr(t)}")
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
# Only Tensors ever get passed directly to operator()
|
| 81 |
+
#
|
| 82 |
+
# NB: CUDA only
|
| 83 |
+
# (Actually, this works for CPU too)
|
| 84 |
+
def ufunctor_apply_type(
|
| 85 |
+
t: Type, *, binds: ArgName, scalar_t: BaseCppType
|
| 86 |
+
) -> NamedCType:
|
| 87 |
+
if t == BaseType(BaseTy.Tensor):
|
| 88 |
+
return NamedCType(binds, BaseCType(scalar_t))
|
| 89 |
+
else:
|
| 90 |
+
raise AssertionError(f"unrecognized type {repr(t)}")
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
# The actual ufunc template function the user writes. Everything here
|
| 94 |
+
# is done in the computation type. compute_t is opmath_t in CUDA and scalar_t
|
| 95 |
+
# in CPU
|
| 96 |
+
def ufunc_type(t: Type, *, binds: ArgName, compute_t: CType) -> NamedCType:
|
| 97 |
+
r = cpp.valuetype_type(t, binds=binds, symint=False)
|
| 98 |
+
if r is not None:
|
| 99 |
+
return r
|
| 100 |
+
|
| 101 |
+
if t == BaseType(BaseTy.Scalar):
|
| 102 |
+
return NamedCType(binds, compute_t)
|
| 103 |
+
elif t == BaseType(BaseTy.Tensor):
|
| 104 |
+
return NamedCType(binds, compute_t)
|
| 105 |
+
else:
|
| 106 |
+
raise AssertionError(f"unrecognized type {repr(t)}")
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def ufunctor_ctor_argument(a: Argument, scalar_t: BaseCppType) -> Binding:
|
| 110 |
+
return Binding(
|
| 111 |
+
nctype=ufunctor_ctor_type(a.type, binds=a.name, scalar_t=scalar_t),
|
| 112 |
+
name=a.name,
|
| 113 |
+
default=None,
|
| 114 |
+
argument=a,
|
| 115 |
+
)
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def ufunctor_apply_argument(a: Argument, scalar_t: BaseCppType) -> Binding:
|
| 119 |
+
return Binding(
|
| 120 |
+
nctype=ufunctor_apply_type(a.type, binds=a.name, scalar_t=scalar_t),
|
| 121 |
+
name=a.name,
|
| 122 |
+
default=None,
|
| 123 |
+
argument=a,
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
def ufunc_argument(a: Argument, compute_t: CType) -> Binding:
|
| 128 |
+
return Binding(
|
| 129 |
+
nctype=ufunc_type(a.type, binds=a.name, compute_t=compute_t),
|
| 130 |
+
name=a.name,
|
| 131 |
+
default=None,
|
| 132 |
+
argument=a,
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
@dataclass(frozen=True)
|
| 137 |
+
class UfunctorBindings:
|
| 138 |
+
ctor: List[Binding]
|
| 139 |
+
apply: List[Binding]
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
# ufunctors are a CUDA-only concept representing functors that take some of
|
| 143 |
+
# their arguments on a host-side constructor, and the rest in the device-side
|
| 144 |
+
# apply. E.g.,
|
| 145 |
+
#
|
| 146 |
+
# template <typename scalar_t>
|
| 147 |
+
# struct CUDAFunctorOnSelf_add {
|
| 148 |
+
# using opmath_t = at::opmath_type<scalar_t>;
|
| 149 |
+
# opmath_t other_;
|
| 150 |
+
# opmath_t alpha_;
|
| 151 |
+
# CUDAFunctorOnSelf_add(opmath_t other, opmath_t alpha) : other_(other), alpha_(alpha) {}
|
| 152 |
+
# __device__ scalar_t operator()(scalar_t self) {
|
| 153 |
+
# return ufunc::add(static_cast<opmath_t>(self), other_, alpha_);
|
| 154 |
+
# }
|
| 155 |
+
# };
|
| 156 |
+
#
|
| 157 |
+
# The ctor refers to the constructor CUDAFunctorOnSelf_add, while apply refers
|
| 158 |
+
# to the operator() definition
|
| 159 |
+
def ufunctor_arguments(
|
| 160 |
+
g: NativeFunctionsGroup, *, scalar_tensor_idx: Optional[int], scalar_t: BaseCppType
|
| 161 |
+
) -> UfunctorBindings:
|
| 162 |
+
ctor = []
|
| 163 |
+
apply = []
|
| 164 |
+
for a in g.functional.func.arguments.flat_non_out:
|
| 165 |
+
if a.type.is_tensor_like():
|
| 166 |
+
if scalar_tensor_idx == 0:
|
| 167 |
+
# put it in the ctor anyway
|
| 168 |
+
ctor.append(ufunctor_ctor_argument(a, scalar_t=scalar_t))
|
| 169 |
+
scalar_tensor_idx = None
|
| 170 |
+
else:
|
| 171 |
+
if scalar_tensor_idx is not None:
|
| 172 |
+
scalar_tensor_idx -= 1
|
| 173 |
+
apply.append(ufunctor_apply_argument(a, scalar_t=scalar_t))
|
| 174 |
+
else:
|
| 175 |
+
ctor.append(ufunctor_ctor_argument(a, scalar_t=scalar_t))
|
| 176 |
+
assert scalar_tensor_idx is None
|
| 177 |
+
return UfunctorBindings(ctor=ctor, apply=apply)
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
# ufuncs are the inner loop template functions that you wrote in ufunc/add.h
|
| 181 |
+
# which do the actual computation in question. E.g.,
|
| 182 |
+
#
|
| 183 |
+
# template <typename T>
|
| 184 |
+
# C10_HOST_DEVICE T add(T self, T other, T alpha) __ubsan_ignore_undefined__ {
|
| 185 |
+
# return self + alpha * other;
|
| 186 |
+
# }
|
| 187 |
+
#
|
| 188 |
+
# In this file, we refer to T as compute_t which is bound by caller
|
| 189 |
+
def ufunc_arguments(g: NativeFunctionsGroup, *, compute_t: CType) -> List[Binding]:
|
| 190 |
+
return [
|
| 191 |
+
ufunc_argument(a, compute_t=compute_t)
|
| 192 |
+
for a in g.functional.func.arguments.flat_non_out
|
| 193 |
+
]
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
# Stubs are the DispatchStub trampolines that CPU kernels use to get to their
|
| 197 |
+
# vectorized versions. E.g.,
|
| 198 |
+
#
|
| 199 |
+
# using structured_binary_fn_alpha = void(*)(TensorIteratorBase&, const Scalar& alpha);
|
| 200 |
+
# DECLARE_DISPATCH(structured_binary_fn_alpha, add_stub);
|
| 201 |
+
def stub_arguments(g: NativeFunctionsGroup) -> List[Binding]:
|
| 202 |
+
# stubs drop all tensor arguments (they are implicit in the TensorIterator
|
| 203 |
+
# argument and keep everything else)
|
| 204 |
+
return [
|
| 205 |
+
r
|
| 206 |
+
for a in g.out.func.arguments.flat_non_out
|
| 207 |
+
if not a.type.is_tensor_like()
|
| 208 |
+
for r in structured.argument(a)
|
| 209 |
+
]
|
wemm/lib/python3.10/site-packages/torchgen/code_template.py
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
from typing import Mapping, Match, Optional, Sequence
|
| 3 |
+
|
| 4 |
+
# match $identifier or ${identifier} and replace with value in env
|
| 5 |
+
# If this identifier is at the beginning of whitespace on a line
|
| 6 |
+
# and its value is a list then it is treated as
|
| 7 |
+
# block substitution by indenting to that depth and putting each element
|
| 8 |
+
# of the list on its own line
|
| 9 |
+
# if the identifier is on a line starting with non-whitespace and a list
|
| 10 |
+
# then it is comma separated ${,foo} will insert a comma before the list
|
| 11 |
+
# if this list is not empty and ${foo,} will insert one after.
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class CodeTemplate:
|
| 15 |
+
substitution_str = r"(^[^\n\S]*)?\$([^\d\W]\w*|\{,?[^\d\W]\w*\,?})"
|
| 16 |
+
substitution = re.compile(substitution_str, re.MULTILINE)
|
| 17 |
+
|
| 18 |
+
pattern: str
|
| 19 |
+
filename: str
|
| 20 |
+
|
| 21 |
+
@staticmethod
|
| 22 |
+
def from_file(filename: str) -> "CodeTemplate":
|
| 23 |
+
with open(filename, "r") as f:
|
| 24 |
+
return CodeTemplate(f.read(), filename)
|
| 25 |
+
|
| 26 |
+
def __init__(self, pattern: str, filename: str = "") -> None:
|
| 27 |
+
self.pattern = pattern
|
| 28 |
+
self.filename = filename
|
| 29 |
+
|
| 30 |
+
def substitute(
|
| 31 |
+
self, env: Optional[Mapping[str, object]] = None, **kwargs: object
|
| 32 |
+
) -> str:
|
| 33 |
+
if env is None:
|
| 34 |
+
env = {}
|
| 35 |
+
|
| 36 |
+
def lookup(v: str) -> object:
|
| 37 |
+
assert env is not None
|
| 38 |
+
return kwargs[v] if v in kwargs else env[v]
|
| 39 |
+
|
| 40 |
+
def indent_lines(indent: str, v: Sequence[object]) -> str:
|
| 41 |
+
return "".join(
|
| 42 |
+
[indent + l + "\n" for e in v for l in str(e).splitlines()]
|
| 43 |
+
).rstrip()
|
| 44 |
+
|
| 45 |
+
def replace(match: Match[str]) -> str:
|
| 46 |
+
indent = match.group(1)
|
| 47 |
+
key = match.group(2)
|
| 48 |
+
comma_before = ""
|
| 49 |
+
comma_after = ""
|
| 50 |
+
if key[0] == "{":
|
| 51 |
+
key = key[1:-1]
|
| 52 |
+
if key[0] == ",":
|
| 53 |
+
comma_before = ", "
|
| 54 |
+
key = key[1:]
|
| 55 |
+
if key[-1] == ",":
|
| 56 |
+
comma_after = ", "
|
| 57 |
+
key = key[:-1]
|
| 58 |
+
v = lookup(key)
|
| 59 |
+
if indent is not None:
|
| 60 |
+
if not isinstance(v, list):
|
| 61 |
+
v = [v]
|
| 62 |
+
return indent_lines(indent, v)
|
| 63 |
+
elif isinstance(v, list):
|
| 64 |
+
middle = ", ".join([str(x) for x in v])
|
| 65 |
+
if len(v) == 0:
|
| 66 |
+
return middle
|
| 67 |
+
return comma_before + middle + comma_after
|
| 68 |
+
else:
|
| 69 |
+
return str(v)
|
| 70 |
+
|
| 71 |
+
return self.substitution.sub(replace, self.pattern)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
if __name__ == "__main__":
|
| 75 |
+
c = CodeTemplate(
|
| 76 |
+
"""\
|
| 77 |
+
int foo($args) {
|
| 78 |
+
|
| 79 |
+
$bar
|
| 80 |
+
$bar
|
| 81 |
+
$a+$b
|
| 82 |
+
}
|
| 83 |
+
int commatest(int a${,stuff})
|
| 84 |
+
int notest(int a${,empty,})
|
| 85 |
+
"""
|
| 86 |
+
)
|
| 87 |
+
print(
|
| 88 |
+
c.substitute(
|
| 89 |
+
args=["hi", 8],
|
| 90 |
+
bar=["what", 7],
|
| 91 |
+
a=3,
|
| 92 |
+
b=4,
|
| 93 |
+
stuff=["things...", "others"],
|
| 94 |
+
empty=[],
|
| 95 |
+
)
|
| 96 |
+
)
|
wemm/lib/python3.10/site-packages/torchgen/context.py
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import contextlib
|
| 2 |
+
|
| 3 |
+
import functools
|
| 4 |
+
from typing import Callable, Dict, Iterator, Optional, TypeVar, Union
|
| 5 |
+
|
| 6 |
+
import torchgen.local as local
|
| 7 |
+
from torchgen.model import (
|
| 8 |
+
BackendIndex,
|
| 9 |
+
DispatchKey,
|
| 10 |
+
NativeFunction,
|
| 11 |
+
NativeFunctionsGroup,
|
| 12 |
+
NativeFunctionsViewGroup,
|
| 13 |
+
)
|
| 14 |
+
from torchgen.utils import context, S, T
|
| 15 |
+
|
| 16 |
+
# Helper functions for defining generators on things in the model
|
| 17 |
+
|
| 18 |
+
F = TypeVar(
|
| 19 |
+
"F",
|
| 20 |
+
NativeFunction,
|
| 21 |
+
NativeFunctionsGroup,
|
| 22 |
+
NativeFunctionsViewGroup,
|
| 23 |
+
Union[NativeFunction, NativeFunctionsGroup],
|
| 24 |
+
Union[NativeFunction, NativeFunctionsViewGroup],
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
F2 = TypeVar(
|
| 28 |
+
"F2",
|
| 29 |
+
NativeFunction,
|
| 30 |
+
NativeFunctionsGroup,
|
| 31 |
+
Optional[NativeFunction],
|
| 32 |
+
bool,
|
| 33 |
+
str,
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
@contextlib.contextmanager
|
| 38 |
+
def native_function_manager(
|
| 39 |
+
g: Union[NativeFunctionsGroup, NativeFunctionsViewGroup, NativeFunction]
|
| 40 |
+
) -> Iterator[None]:
|
| 41 |
+
if isinstance(g, NativeFunctionsGroup):
|
| 42 |
+
# By default, we associate all errors with structured native functions
|
| 43 |
+
# with the out variant. In some cases, it might be better to have
|
| 44 |
+
# a more specific place to hang things; if so, use
|
| 45 |
+
# native_function_manager again on the inside
|
| 46 |
+
f = g.out
|
| 47 |
+
elif isinstance(g, NativeFunctionsViewGroup):
|
| 48 |
+
# We associate errors with the view operator
|
| 49 |
+
f = g.view
|
| 50 |
+
else:
|
| 51 |
+
f = g
|
| 52 |
+
with context(lambda: f"in native_functions.yaml line {f.loc}:\n {f.func}"):
|
| 53 |
+
with local.parametrize(
|
| 54 |
+
use_const_ref_for_mutable_tensors=f.use_const_ref_for_mutable_tensors,
|
| 55 |
+
use_ilistref_for_tensor_lists=f.part_of_structured_group,
|
| 56 |
+
):
|
| 57 |
+
yield
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
# Given a function that operates on NativeFunction, wrap it into a new function
|
| 61 |
+
# that sets some appropriate context managers for that native function.
|
| 62 |
+
# YOU MUST WRAP FUNCTIONS IN THIS for calls to api modules to be sound
|
| 63 |
+
# (you will get an error if we try to access the local variables without having
|
| 64 |
+
# set them).
|
| 65 |
+
def with_native_function(func: Callable[[F], T]) -> Callable[[F], T]:
|
| 66 |
+
@functools.wraps(func)
|
| 67 |
+
def wrapper(f: F) -> T:
|
| 68 |
+
with native_function_manager(f):
|
| 69 |
+
return func(f)
|
| 70 |
+
|
| 71 |
+
return wrapper
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def with_native_function_and(func: Callable[[F, F2], T]) -> Callable[[F, F2], T]:
|
| 75 |
+
@functools.wraps(func)
|
| 76 |
+
def wrapper(f: F, f2: F2) -> T:
|
| 77 |
+
# The first native_function is assumed to be the one with the appropriate context.
|
| 78 |
+
with native_function_manager(f):
|
| 79 |
+
return func(f, f2)
|
| 80 |
+
|
| 81 |
+
return wrapper
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def method_with_native_function(func: Callable[[S, F], T]) -> Callable[[S, F], T]:
|
| 85 |
+
@functools.wraps(func)
|
| 86 |
+
def wrapper(slf: S, f: F) -> T:
|
| 87 |
+
with native_function_manager(f):
|
| 88 |
+
return func(slf, f)
|
| 89 |
+
|
| 90 |
+
return wrapper
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
# Convenience decorator for functions that explicitly take in a BackendIndex,
|
| 94 |
+
# instead of indirectly taking one in as a closure
|
| 95 |
+
def with_native_function_and_index(
|
| 96 |
+
func: Callable[[F, BackendIndex], T]
|
| 97 |
+
) -> Callable[[F, BackendIndex], T]:
|
| 98 |
+
@functools.wraps(func)
|
| 99 |
+
def wrapper(f: F, backend_index: BackendIndex) -> T:
|
| 100 |
+
with native_function_manager(f):
|
| 101 |
+
return func(f, backend_index)
|
| 102 |
+
|
| 103 |
+
return wrapper
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
# Convenience decorator for functions that explicitly take in a Dict of BackendIndices
|
| 107 |
+
def with_native_function_and_indices(
|
| 108 |
+
func: Callable[[F, Dict[DispatchKey, BackendIndex]], T]
|
| 109 |
+
) -> Callable[[F, Dict[DispatchKey, BackendIndex]], T]:
|
| 110 |
+
@functools.wraps(func)
|
| 111 |
+
def wrapper(f: F, backend_indices: Dict[DispatchKey, BackendIndex]) -> T:
|
| 112 |
+
with native_function_manager(f):
|
| 113 |
+
return func(f, backend_indices)
|
| 114 |
+
|
| 115 |
+
return wrapper
|
wemm/lib/python3.10/site-packages/torchgen/dest/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (650 Bytes). View file
|
|
|
wemm/lib/python3.10/site-packages/torchgen/dest/__pycache__/lazy_ir.cpython-310.pyc
ADDED
|
Binary file (23.4 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchgen/dest/__pycache__/native_functions.cpython-310.pyc
ADDED
|
Binary file (2.22 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchgen/dest/__pycache__/register_dispatch_key.cpython-310.pyc
ADDED
|
Binary file (23.9 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchgen/dest/__pycache__/ufunc.cpython-310.pyc
ADDED
|
Binary file (14 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchgen/dest/lazy_ts_lowering.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from torchgen.api.lazy import LazyIrSchema
|
| 2 |
+
from torchgen.api.types import OptionalCType
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def ts_lowering_body(schema: LazyIrSchema) -> str:
|
| 6 |
+
# for now, we just want one IR class decl and soon after also the method defs
|
| 7 |
+
# and we use the functional version not out/inplace.
|
| 8 |
+
emplace_arguments = []
|
| 9 |
+
for arg in schema.positional_args:
|
| 10 |
+
if arg.is_lazy_value:
|
| 11 |
+
if isinstance(arg.lazy_type, OptionalCType):
|
| 12 |
+
emplace_arguments.append(
|
| 13 |
+
f"has_{arg.name} ? loctx->GetOutputOp(operand(i++)) : nullptr"
|
| 14 |
+
)
|
| 15 |
+
continue
|
| 16 |
+
emplace_arguments.append("loctx->GetOutputOp(operand(i++))")
|
| 17 |
+
continue
|
| 18 |
+
emplace_arguments.append(f'"{arg.name}", {arg.name}')
|
| 19 |
+
|
| 20 |
+
emplace_arguments_str = "\n ".join(
|
| 21 |
+
[f"arguments.emplace_back({a});" for a in emplace_arguments]
|
| 22 |
+
)
|
| 23 |
+
emplace_kwarg_values = [
|
| 24 |
+
f'"{arg.name}", loctx->GetOutputOp(operand(i++))'
|
| 25 |
+
for arg in schema.keyword_values
|
| 26 |
+
]
|
| 27 |
+
emplace_kwarg_scalars = [
|
| 28 |
+
f'"{arg.name}", {arg.name}' for arg in schema.keyword_scalars
|
| 29 |
+
]
|
| 30 |
+
emplace_kwarguments = "\n ".join(
|
| 31 |
+
[
|
| 32 |
+
f"kwarguments.emplace_back({a});"
|
| 33 |
+
for a in emplace_kwarg_values + emplace_kwarg_scalars
|
| 34 |
+
]
|
| 35 |
+
)
|
| 36 |
+
return f"""\
|
| 37 |
+
std::vector<torch::jit::NamedValue> arguments;
|
| 38 |
+
std::vector<torch::jit::NamedValue> kwarguments;
|
| 39 |
+
arguments.reserve({len(emplace_arguments)});
|
| 40 |
+
kwarguments.reserve({len(emplace_kwarg_values + emplace_kwarg_scalars)});
|
| 41 |
+
size_t i = 0;
|
| 42 |
+
{emplace_arguments_str}
|
| 43 |
+
{emplace_kwarguments}
|
| 44 |
+
torch::lazy::TSOpVector {schema.aten_name}_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments);
|
| 45 |
+
TORCH_CHECK_EQ({schema.aten_name}_out.size(), {len(schema.returns)});
|
| 46 |
+
|
| 47 |
+
return {schema.aten_name}_out;
|
| 48 |
+
"""
|
wemm/lib/python3.10/site-packages/torchgen/dest/native_functions.py
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Optional, Union
|
| 2 |
+
|
| 3 |
+
import torchgen.api.meta as meta
|
| 4 |
+
import torchgen.api.structured as structured
|
| 5 |
+
from torchgen.api.types import kernel_signature
|
| 6 |
+
|
| 7 |
+
from torchgen.context import with_native_function_and_index
|
| 8 |
+
from torchgen.model import BackendIndex, NativeFunction, NativeFunctionsGroup
|
| 9 |
+
from torchgen.utils import mapMaybe
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
@with_native_function_and_index
|
| 13 |
+
def gen_unstructured(f: NativeFunction, backend_index: BackendIndex) -> Optional[str]:
|
| 14 |
+
sig = kernel_signature(f, backend_index)
|
| 15 |
+
metadata = backend_index.get_kernel(f)
|
| 16 |
+
if metadata is None:
|
| 17 |
+
return None
|
| 18 |
+
if "legacy::" in metadata.kernel:
|
| 19 |
+
return None
|
| 20 |
+
else:
|
| 21 |
+
prefix = "static" if backend_index.external else "TORCH_API"
|
| 22 |
+
return f"{prefix} {sig.decl(name=metadata.kernel)};"
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
@with_native_function_and_index
|
| 26 |
+
def gen_structured(g: NativeFunctionsGroup, backend_index: BackendIndex) -> List[str]:
|
| 27 |
+
meta_name = meta.name(g)
|
| 28 |
+
out_args = structured.impl_arguments(g)
|
| 29 |
+
metadata = backend_index.get_kernel(g)
|
| 30 |
+
if metadata is None:
|
| 31 |
+
return []
|
| 32 |
+
prefix = "" if backend_index.external else "TORCH_API "
|
| 33 |
+
return [
|
| 34 |
+
f"""\
|
| 35 |
+
struct {prefix}structured_{metadata.kernel} : public at::meta::structured_{meta_name} {{
|
| 36 |
+
void impl({', '.join(a.decl() for a in out_args)});
|
| 37 |
+
}};
|
| 38 |
+
"""
|
| 39 |
+
]
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
# Generates NativeFunctions.h, a list of forward declarations of all
|
| 43 |
+
# actual kernel definitions we keep in aten/src/ATen/native/
|
| 44 |
+
@with_native_function_and_index
|
| 45 |
+
def compute_native_function_declaration(
|
| 46 |
+
g: Union[NativeFunctionsGroup, NativeFunction], backend_index: BackendIndex
|
| 47 |
+
) -> List[str]:
|
| 48 |
+
metadata = backend_index.get_kernel(g)
|
| 49 |
+
if isinstance(g, NativeFunctionsGroup):
|
| 50 |
+
if metadata is not None and metadata.structured:
|
| 51 |
+
if backend_index.external:
|
| 52 |
+
# Structured hasn't been tested with external backends yet.
|
| 53 |
+
raise AssertionError(
|
| 54 |
+
"Structured external backend functions are not implemented yet."
|
| 55 |
+
)
|
| 56 |
+
else:
|
| 57 |
+
return gen_structured(g, backend_index)
|
| 58 |
+
else:
|
| 59 |
+
return list(
|
| 60 |
+
mapMaybe(lambda f: gen_unstructured(f, backend_index), g.functions())
|
| 61 |
+
)
|
| 62 |
+
else:
|
| 63 |
+
x = gen_unstructured(g, backend_index)
|
| 64 |
+
return [] if x is None else [x]
|
wemm/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/custom_ops.cpython-310.pyc
ADDED
|
Binary file (3.78 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchgen/executorch/api/et_cpp.py
ADDED
|
@@ -0,0 +1,368 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Optional, Sequence, Set, Union
|
| 2 |
+
|
| 3 |
+
from torchgen import local
|
| 4 |
+
from torchgen.api.types import (
|
| 5 |
+
ArgName,
|
| 6 |
+
ArrayCType,
|
| 7 |
+
BaseCType,
|
| 8 |
+
Binding,
|
| 9 |
+
ConstRefCType,
|
| 10 |
+
CType,
|
| 11 |
+
MutRefCType,
|
| 12 |
+
NamedCType,
|
| 13 |
+
SpecialArgName,
|
| 14 |
+
TupleCType,
|
| 15 |
+
VectorCType,
|
| 16 |
+
voidT,
|
| 17 |
+
)
|
| 18 |
+
from torchgen.model import (
|
| 19 |
+
Argument,
|
| 20 |
+
Arguments,
|
| 21 |
+
BaseTy,
|
| 22 |
+
BaseType,
|
| 23 |
+
ListType,
|
| 24 |
+
NativeFunction,
|
| 25 |
+
OptionalType,
|
| 26 |
+
Return,
|
| 27 |
+
SelfArgument,
|
| 28 |
+
TensorOptionsArguments,
|
| 29 |
+
Type,
|
| 30 |
+
)
|
| 31 |
+
from torchgen.utils import assert_never
|
| 32 |
+
from .types import (
|
| 33 |
+
ArrayRefCType,
|
| 34 |
+
BaseTypeToCppMapping,
|
| 35 |
+
OptionalCType,
|
| 36 |
+
scalarT,
|
| 37 |
+
tensorListT,
|
| 38 |
+
tensorT,
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
"""
|
| 42 |
+
This file describes the translation of JIT schema to the public C++ API, which is what people use when they call
|
| 43 |
+
functions like at::add. It also serves as a native function API, which is the signature of kernels,
|
| 44 |
+
since in Executorch CppSignature is the same as NativeSignature.
|
| 45 |
+
|
| 46 |
+
Difference between this file and torchgen.api.cpp.py:
|
| 47 |
+
|
| 48 |
+
- Executorch doesn't support TensorOptions, however in this file we still keep the logic here to be compatible with
|
| 49 |
+
torchgen.api.cpp, so that we can do stuff like ATen mode (running ATen kernels in Executorch).
|
| 50 |
+
|
| 51 |
+
- Executorch doesn't support Dimname.
|
| 52 |
+
|
| 53 |
+
- Executorch runtime doesn't support SymInt, will treat it as int.
|
| 54 |
+
"""
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
# Translation of "value types" in JIT schema to C++ API type. Value
|
| 58 |
+
# types look the same no matter if they are argument types or return
|
| 59 |
+
# types. Returns None if the type in question is not a value type.
|
| 60 |
+
def valuetype_type(
|
| 61 |
+
t: Type,
|
| 62 |
+
*,
|
| 63 |
+
binds: ArgName,
|
| 64 |
+
remove_non_owning_ref_types: bool = False,
|
| 65 |
+
) -> Optional[NamedCType]:
|
| 66 |
+
if isinstance(t, BaseType):
|
| 67 |
+
if t.name == BaseTy.Tensor or t.name == BaseTy.Scalar:
|
| 68 |
+
return None
|
| 69 |
+
# For SymInt we simply treat it as int.
|
| 70 |
+
elif str(t) == "SymInt":
|
| 71 |
+
return NamedCType(binds, BaseCType(BaseTypeToCppMapping[BaseTy.int]))
|
| 72 |
+
if remove_non_owning_ref_types:
|
| 73 |
+
if t.name == BaseTy.str:
|
| 74 |
+
raise AssertionError(
|
| 75 |
+
"string ref->value conversion: not implemented yet"
|
| 76 |
+
)
|
| 77 |
+
# All other BaseType currently map directly to BaseCppTypes.
|
| 78 |
+
return NamedCType(binds, BaseCType(BaseTypeToCppMapping[t.name]))
|
| 79 |
+
elif isinstance(t, OptionalType):
|
| 80 |
+
elem = valuetype_type(t.elem, binds=binds)
|
| 81 |
+
if elem is None:
|
| 82 |
+
return None
|
| 83 |
+
return NamedCType(binds, OptionalCType(elem.type))
|
| 84 |
+
elif isinstance(t, ListType):
|
| 85 |
+
if str(t.elem) == "bool":
|
| 86 |
+
assert t.size is not None
|
| 87 |
+
return NamedCType(
|
| 88 |
+
binds, ArrayCType(BaseCType(BaseTypeToCppMapping[BaseTy.bool]), t.size)
|
| 89 |
+
)
|
| 90 |
+
else:
|
| 91 |
+
return None
|
| 92 |
+
else:
|
| 93 |
+
raise AssertionError(f"unrecognized type {repr(t)}")
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
# Translation of types occuring in JIT arguments to a C++ argument type.
|
| 97 |
+
# If remove_non_owning_ref_types is set, we'll guarantee that the outputed CType is not a non-owning reference type.
|
| 98 |
+
# For example, we'll return std::vector<int> instead of IntArrayRef.
|
| 99 |
+
# See Note [translation from C++ reference to value types]
|
| 100 |
+
def argumenttype_type(
|
| 101 |
+
t: Type,
|
| 102 |
+
*,
|
| 103 |
+
mutable: bool,
|
| 104 |
+
binds: ArgName,
|
| 105 |
+
remove_non_owning_ref_types: bool = False,
|
| 106 |
+
) -> NamedCType:
|
| 107 |
+
# If it's a value type, do the value type translation
|
| 108 |
+
r = valuetype_type(
|
| 109 |
+
t,
|
| 110 |
+
binds=binds,
|
| 111 |
+
remove_non_owning_ref_types=remove_non_owning_ref_types,
|
| 112 |
+
)
|
| 113 |
+
if r is not None:
|
| 114 |
+
return r
|
| 115 |
+
if isinstance(t, BaseType):
|
| 116 |
+
if t.name == BaseTy.Tensor:
|
| 117 |
+
if mutable and not local.use_const_ref_for_mutable_tensors():
|
| 118 |
+
return NamedCType(binds, MutRefCType(BaseCType(tensorT)))
|
| 119 |
+
else:
|
| 120 |
+
return NamedCType(binds, ConstRefCType(BaseCType(tensorT)))
|
| 121 |
+
elif t.name == BaseTy.Scalar:
|
| 122 |
+
return NamedCType(binds, ConstRefCType(BaseCType(scalarT)))
|
| 123 |
+
else:
|
| 124 |
+
raise AssertionError(f"base type should have been value type {t}")
|
| 125 |
+
elif isinstance(t, OptionalType):
|
| 126 |
+
if str(t.elem) == "Tensor":
|
| 127 |
+
if mutable and not local.use_const_ref_for_mutable_tensors():
|
| 128 |
+
return NamedCType(
|
| 129 |
+
binds, MutRefCType(BaseCType(tensorT))
|
| 130 |
+
) # TODO: fix this discrepancy
|
| 131 |
+
else:
|
| 132 |
+
return NamedCType(
|
| 133 |
+
binds, ConstRefCType(OptionalCType(BaseCType(tensorT)))
|
| 134 |
+
)
|
| 135 |
+
elif str(t.elem) == "Scalar":
|
| 136 |
+
return NamedCType(binds, ConstRefCType(OptionalCType(BaseCType(scalarT))))
|
| 137 |
+
elem = argumenttype_type(t.elem, mutable=mutable, binds=binds)
|
| 138 |
+
return NamedCType(binds, OptionalCType(elem.type))
|
| 139 |
+
elif isinstance(t, ListType):
|
| 140 |
+
# TODO: keeping these special cases for Tensor[] and Tensor?[] so that we can hookup with ATen kernels.
|
| 141 |
+
if str(t.elem) == "Tensor":
|
| 142 |
+
return NamedCType(binds, BaseCType(tensorListT))
|
| 143 |
+
elif str(t.elem) == "Dimname":
|
| 144 |
+
raise NotImplementedError("Executorch doesn't support Dimname")
|
| 145 |
+
elif str(t.elem) == "Tensor?":
|
| 146 |
+
return NamedCType(binds, ArrayRefCType(OptionalCType(BaseCType(tensorT))))
|
| 147 |
+
elem = argumenttype_type(t.elem, mutable=mutable, binds=binds)
|
| 148 |
+
return NamedCType(binds, ArrayRefCType(elem.type))
|
| 149 |
+
else:
|
| 150 |
+
raise AssertionError(f"unrecognized type {repr(t)}")
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
# Translate a JIT argument into its C++ type
|
| 154 |
+
def argument_type(a: Argument, *, binds: ArgName) -> NamedCType:
|
| 155 |
+
return argumenttype_type(a.type, mutable=a.is_write, binds=binds)
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
# Translation of a (non-multi) return type from JIT to C++
|
| 159 |
+
# N.B: returntype_type returns a CType, not a NamedCType.
|
| 160 |
+
# This is mostly because of the mismatch between return types and return names.
|
| 161 |
+
# e.g. a function with a return type of 'void' has 0 return names,
|
| 162 |
+
# and a function with a return type of 'std::tuple' has >1 return name.
|
| 163 |
+
def returntype_type(t: Type, *, mutable: bool) -> CType:
|
| 164 |
+
# placeholder is ignored
|
| 165 |
+
r = valuetype_type(t, binds="__placeholder__")
|
| 166 |
+
if r is not None:
|
| 167 |
+
return r.type
|
| 168 |
+
|
| 169 |
+
if isinstance(t, BaseType):
|
| 170 |
+
if t.name == BaseTy.Tensor:
|
| 171 |
+
if mutable:
|
| 172 |
+
if local.use_const_ref_for_mutable_tensors():
|
| 173 |
+
return ConstRefCType(BaseCType(tensorT))
|
| 174 |
+
else:
|
| 175 |
+
return MutRefCType(BaseCType(tensorT))
|
| 176 |
+
else:
|
| 177 |
+
# Note [Tensor Copy Returns]
|
| 178 |
+
# Currently, we use "Argument.is_write" to determine
|
| 179 |
+
# whether or not Tensor return types should be copies or references.
|
| 180 |
+
# If that ever changes, take a look at other locations of this note!
|
| 181 |
+
return BaseCType(tensorT)
|
| 182 |
+
elif t.name == BaseTy.Scalar:
|
| 183 |
+
return BaseCType(scalarT)
|
| 184 |
+
elif isinstance(t, ListType):
|
| 185 |
+
assert (
|
| 186 |
+
not mutable
|
| 187 |
+
), "Native functions should never return a mutable tensor list. They should return void."
|
| 188 |
+
elem = returntype_type(t.elem, mutable=False)
|
| 189 |
+
assert t.size is None, f"fixed size list returns not supported: {t}"
|
| 190 |
+
return VectorCType(elem)
|
| 191 |
+
|
| 192 |
+
raise AssertionError(f"unrecognized return type {t}")
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
# Translation of a single return to its C++ type
|
| 196 |
+
def return_type(r: Return) -> CType:
|
| 197 |
+
return returntype_type(r.type, mutable=r.is_write)
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
# Translation of a full (possibly multi) return from JIT to its C++ type
|
| 201 |
+
def returns_type(rs: Sequence[Return]) -> CType:
|
| 202 |
+
if len(rs) == 0:
|
| 203 |
+
return BaseCType(voidT)
|
| 204 |
+
elif len(rs) == 1:
|
| 205 |
+
return return_type(rs[0])
|
| 206 |
+
else:
|
| 207 |
+
return TupleCType([return_type(r) for r in rs])
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
def return_names(f: NativeFunction, *, fallback_name: str = "result") -> Sequence[str]:
|
| 211 |
+
returns: List[str] = []
|
| 212 |
+
for i, r in enumerate(f.func.returns):
|
| 213 |
+
# If we have an inplace function, the return argument is
|
| 214 |
+
# implicitly named self.
|
| 215 |
+
# TODO: Consider incorporating this into the data model
|
| 216 |
+
if f.func.name.name.inplace:
|
| 217 |
+
assert i == 0, "illegal inplace function with multiple returns"
|
| 218 |
+
name = "self"
|
| 219 |
+
# If we are out function, the name is the name of the
|
| 220 |
+
# corresponding output function (r.name will get recorded
|
| 221 |
+
# in field_name later.)
|
| 222 |
+
elif f.func.is_out_fn():
|
| 223 |
+
name = f.func.arguments.out[i].name
|
| 224 |
+
# If the return argument is explicitly named...
|
| 225 |
+
elif r.name:
|
| 226 |
+
name_conflict = any(
|
| 227 |
+
r.name == a.name for a in f.func.schema_order_arguments()
|
| 228 |
+
)
|
| 229 |
+
if name_conflict and not f.func.is_out_fn():
|
| 230 |
+
name = f"{r.name}_return"
|
| 231 |
+
else:
|
| 232 |
+
name = r.name
|
| 233 |
+
# If there is no explicit name and no fallback name was passed in, we just name the output result,
|
| 234 |
+
# unless it's a multi-return, in which case it's result0,
|
| 235 |
+
# result1, etc (zero-indexed)
|
| 236 |
+
else:
|
| 237 |
+
name = fallback_name if len(f.func.returns) == 1 else f"{fallback_name}{i}"
|
| 238 |
+
returns.append(name)
|
| 239 |
+
return returns
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
JIT_TO_CPP_DEFAULT = {
|
| 243 |
+
"False": "false",
|
| 244 |
+
"True": "true",
|
| 245 |
+
"None": "torch::executorch::nullopt", # UGH this one is type directed
|
| 246 |
+
"[]": "{}",
|
| 247 |
+
"contiguous_format": "torch::executorch::MemoryFormat::Contiguous",
|
| 248 |
+
"long": "torch::executorch::kLong",
|
| 249 |
+
}
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
# Convert a JIT default into C++ expression representing the default
|
| 253 |
+
def default_expr(d: str, t: Type) -> str:
|
| 254 |
+
if d == "None" and str(t) == "Tensor?":
|
| 255 |
+
return "{}"
|
| 256 |
+
if isinstance(t, BaseType) and t.name is BaseTy.str:
|
| 257 |
+
# Schema allows single quotes but C++ needs double
|
| 258 |
+
if len(d) >= 2 and d[0] == "'" and d[-1] == "'":
|
| 259 |
+
s = ""
|
| 260 |
+
i = 1
|
| 261 |
+
while i + 1 < len(d):
|
| 262 |
+
if d[i] != "\\":
|
| 263 |
+
if d[i] == '"':
|
| 264 |
+
s += '\\"'
|
| 265 |
+
else:
|
| 266 |
+
s += d[i]
|
| 267 |
+
i += 1
|
| 268 |
+
else:
|
| 269 |
+
if d[i + 1] == "'":
|
| 270 |
+
s += "'"
|
| 271 |
+
else:
|
| 272 |
+
s += d[i : i + 2]
|
| 273 |
+
i += 2
|
| 274 |
+
|
| 275 |
+
return f'"{s}"'
|
| 276 |
+
|
| 277 |
+
if isinstance(t, OptionalType):
|
| 278 |
+
if d == "None":
|
| 279 |
+
return "torch::executor::nullopt"
|
| 280 |
+
|
| 281 |
+
return default_expr(d, t.elem)
|
| 282 |
+
|
| 283 |
+
if isinstance(t, ListType):
|
| 284 |
+
if d.startswith("[") and d.endswith("]"):
|
| 285 |
+
return "{" + d[1:-1] + "}"
|
| 286 |
+
elif t.size is None:
|
| 287 |
+
# NOTE: Sized lists can have scalar defaults
|
| 288 |
+
raise ValueError(f"Expected a list default '[...]' but found: '{d}'")
|
| 289 |
+
|
| 290 |
+
return JIT_TO_CPP_DEFAULT.get(d, d)
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
# Convert an argument into its C++ API form
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
def argument(
|
| 297 |
+
a: Union[Argument, TensorOptionsArguments, SelfArgument],
|
| 298 |
+
*,
|
| 299 |
+
cpp_no_default_args: Set[str],
|
| 300 |
+
method: bool,
|
| 301 |
+
faithful: bool,
|
| 302 |
+
has_tensor_options: bool,
|
| 303 |
+
) -> List[Binding]:
|
| 304 |
+
def sub_argument(
|
| 305 |
+
a: Union[Argument, TensorOptionsArguments, SelfArgument]
|
| 306 |
+
) -> List[Binding]:
|
| 307 |
+
return argument(
|
| 308 |
+
a,
|
| 309 |
+
cpp_no_default_args=cpp_no_default_args,
|
| 310 |
+
method=method,
|
| 311 |
+
faithful=faithful,
|
| 312 |
+
has_tensor_options=has_tensor_options,
|
| 313 |
+
)
|
| 314 |
+
|
| 315 |
+
if isinstance(a, Argument):
|
| 316 |
+
binds: ArgName
|
| 317 |
+
if a.name == "memory_format" and has_tensor_options:
|
| 318 |
+
binds = SpecialArgName.possibly_redundant_memory_format
|
| 319 |
+
else:
|
| 320 |
+
binds = a.name
|
| 321 |
+
default: Optional[str] = None
|
| 322 |
+
if a.name not in cpp_no_default_args and a.default is not None:
|
| 323 |
+
default = default_expr(a.default, a.type)
|
| 324 |
+
return [
|
| 325 |
+
Binding(
|
| 326 |
+
nctype=argument_type(a, binds=binds),
|
| 327 |
+
name=a.name,
|
| 328 |
+
default=default,
|
| 329 |
+
argument=a,
|
| 330 |
+
)
|
| 331 |
+
]
|
| 332 |
+
elif isinstance(a, TensorOptionsArguments):
|
| 333 |
+
raise NotImplementedError("Need to implement type resolution for TensorOptions")
|
| 334 |
+
elif isinstance(a, SelfArgument):
|
| 335 |
+
if method:
|
| 336 |
+
# Caller is responsible for installing implicit this in context!
|
| 337 |
+
return []
|
| 338 |
+
else:
|
| 339 |
+
return sub_argument(a.argument)
|
| 340 |
+
else:
|
| 341 |
+
assert_never(a)
|
| 342 |
+
|
| 343 |
+
|
| 344 |
+
def arguments(
|
| 345 |
+
arguments: Arguments,
|
| 346 |
+
*,
|
| 347 |
+
faithful: bool,
|
| 348 |
+
method: bool,
|
| 349 |
+
cpp_no_default_args: Set[str],
|
| 350 |
+
) -> List[Binding]:
|
| 351 |
+
args: List[Union[Argument, TensorOptionsArguments, SelfArgument]] = []
|
| 352 |
+
if faithful:
|
| 353 |
+
args.extend(arguments.non_out)
|
| 354 |
+
args.extend(arguments.out)
|
| 355 |
+
else:
|
| 356 |
+
args.extend(arguments.out)
|
| 357 |
+
args.extend(arguments.non_out)
|
| 358 |
+
return [
|
| 359 |
+
r.no_default() if faithful else r
|
| 360 |
+
for a in args
|
| 361 |
+
for r in argument(
|
| 362 |
+
a,
|
| 363 |
+
faithful=faithful,
|
| 364 |
+
method=method,
|
| 365 |
+
has_tensor_options=arguments.tensor_options is not None,
|
| 366 |
+
cpp_no_default_args=cpp_no_default_args,
|
| 367 |
+
)
|
| 368 |
+
]
|
wemm/lib/python3.10/site-packages/torchgen/executorch/api/types/__init__.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .types import *
|
| 2 |
+
from .signatures import * # isort:skip
|